code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline df = pd.read_csv('motorcycle-crash-data.csv', low_memory=False) headerlist = df.columns.tolist() headerlist road_type = df[['Interstate', 'State Road', 'Local Road Only', 'Turnpike']] road_type = road_type.replace(['Yes', 'No'], [1, 0]) road_type road_type.sum() plt.figure(figsize=(20,12)) sns.barplot(data=road_type) age_group = df[['16 Year Old Driver Count', '17 Year Old Driver Count', '18 Year Old Driver Count', '19 Year Old Driver Count', '20 Year Old Driver Count', '50-64 Year Old Driver Count', '65-74 Year Old Driver Count', '75 Plus Year Old Driver Count']] age_group plt.figure(figsize=(20,8)) sns.barplot(data=age_group) plt.tight_layout by_county = df['County Name'].unique() by_county by_county = df.groupby(df['County Name'])['Crash Record Number'].count() by_county.head(50) plt.figure(figsize=(15,10)) sns.barplot(x='Crash Record Number', y='County Name', data=by_county) plt.tight_layout by_county_df = pd.DataFrame(by_county) plt.figure(figsize=(15,10)) sns.barplot(data=by_county_df) plt.tight_layout by_county_df by_county_df.columns by_county_df['County Name'] = by_county_df.index by_county_df sns.set_style('darkgrid') plt.figure(figsize=(30,60)) sns.barplot(x='Crash Record Number', y='County Name', data=by_county_df) plt.tight_layout() plt.tick_params(labelsize=20) by_county_df['Crash Record Number'].sum() crash_records_sum = by_county_df['Crash Record Number'].sum() by_county_df['% of Crashes'] = by_county_df['Crash Record Number'].apply(lambda x: x/crash_records_sum) by_county_df county_size_df = pd.read_csv('pa-county-pop-data.csv') county_size_df county_size_df.iloc[0] county_size_df.drop('GrowthRate', axis=1, inplace=True) county_size_df county_size_df['CTYNAME'] = county_size_df['CTYNAME'].apply(lambda x: x.split(' ')[0]) county_size_df pop_sum = county_size_df['Pop'].sum() pop_sum county_size_df['%Pop'] = county_size_df['Pop'].apply(lambda x: x/pop_sum) county_size_df by_county_df.rename(columns={'Crash Record Number': 'CrashRecordCount', 'County Name': 'CTYNAME', '% of Crashes': '%Crashes'}, inplace=True) by_county_df.reset_index(drop=True) by_county_df.loc[(by_county_df.CTYNAME == 'Mckean'), 'CTYNAME'] = 'McKean' county_data = pd.merge(by_county_df, county_size_df, on='CTYNAME') county_data county_data.head(60) county_data.tail(6) county_data_sub1 = county_data[['CTYNAME', '%Crashes']] county_data_sub1 county_data_sub2 = county_data[['CTYNAME', '%Pop']] county_data_sub2 county_data_sub1['Metric'] = '%Crash' county_data_sub1 county_data_sub2['Metric'] = '%Pop' county_data_sub1.rename(columns={'%Crashes': '%'}, inplace=True) county_data_sub2.rename(columns={'%Pop': '%'}, inplace = True) bypercent_county = pd.concat([county_data_sub1, county_data_sub2], ignore_index=True) bypercent_county plt.figure(figsize=(15,30)) sns.set_style('darkgrid') sns.barplot(x='%', y='CTYNAME', data=bypercent_county, hue='Metric') plt.tick_params(labelsize=20) county_data county_data['PopCrash-Idx'] = (county_data['%Pop'] - county_data['%Crashes']) / county_data['%Pop'] county_data plt.figure(figsize=(20,15)) sns.set_context('poster') sns.barplot(x='PopCrash-Idx', y='CTYNAME', data=county_data.sort_values(by='PopCrash-Idx', ascending=False)) plt.tick_params(labelsize=10) county_list = df['County Name'].unique().tolist() couty_list county_list sullivan_county = df[df['County Name'] == 'Sullivan'] for i in sullivan_county.columns: print(i)
projects/ate-252/workspace/project_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R [conda env:singlecell] # language: R # name: conda-env-singlecell-r # --- library(SCORPIUS) cluster2 <- read.delim("/home/spuccio/data/cluster2.csv", row.names=1) cluster2_meta <- read.delim("/home/spuccio/data//cluster2_meta.csv", header=FALSE) tf <- read.delim("/home/spuccio/data/hg38_tf.txt", header=FALSE) space <- reduce_dimensionality(as.matrix(cluster2), "spearman", ndim = 3) draw_trajectory_plot(space, progression_group = as.factor(cluster2_meta$V2), contour = TRUE) traj <- infer_trajectory(space) gimp <- gene_importances(as.matrix(cluster2), traj$time, num_permutations = 0, num_threads = 8) gene_sel <- gimp[1:50,] # + #tf[1:100,] # + #expr_sel <- as.matrix(cluster2)[,tf$V1] # + #options(repr.plot.width=12, repr.plot.height=11) #draw_trajectory_heatmap(expr_sel, traj$time, as.factor(cluster2_meta$V2)) # - newDF<- as.data.frame(cluster2)[ ,which((names( as.data.frame(cluster2)) %in% tf$V1)==TRUE)] newDF2 <- newDF[, colSums(newDF != 0) > 0] # + #newDF2[1:50,] # + #options(repr.plot.width=12, repr.plot.height=11) #draw_trajectory_heatmap(newDF2, traj$time, as.factor(cluster2_meta$V2)) # + #modules <- extract_modules(scale_quantile(newDF2), traj$time, verbose = FALSE) #options(repr.plot.width=22, repr.plot.height=21) #draw_trajectory_heatmap(newDF2, traj$time, as.factor(cluster2_meta$V2), modules,show_labels_row = TRUE) # - gimp <- gene_importances(newDF2, traj$time, num_permutations = 0, ntree = 1000,mtry = ncol(newDF2) * 0.01) gene_sel2 <- gimp[1:100,] expr_sel2 <- newDF2[,gene_sel2$gene] modules2 <- extract_modules(scale_quantile(expr_sel2)) draw_trajectory_heatmap(expr_sel2, traj$time, as.factor(cluster2_meta$V2), modules2,show_labels_row = TRUE) write.csv(modules2,"/home/spuccio/data/modules.txt",row.names = F)
Cariplo/Scorpius.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import time import matplotlib.pyplot as plt import random file_list = open('index.txt', 'r').read().split('\n')[:-1] # + def deletecomment(string): if '/*' not in string: return string comment_start = string.find('/*') comment_end = string.find('*/') return deletecomment(string[:comment_start] + string[comment_end + 2:]) def findline(lines, target_string, num_line = 0): if num_line == len(lines): return -1 if target_string in lines[num_line]: return num_line else: return findline(lines, target_string, num_line + 1) def findfunctionend(lines, count, num_line): count += lines[num_line].count('{') count -= lines[num_line].count('}') if count == 0: return num_line return findfunctionend(lines, count, num_line + 1) def extractfunction(lines, start_line): end_line = findfunctionend(lines, 0, start_line) return lines[start_line + 1 : end_line] def cleanupfunction(function): function = [line.lstrip() for line in function] function = ''.join(function) signs = ['=', '+', '-', '*', '/', '!=', '=='] for sign in signs: function = function.replace(' ' + sign + ' ', sign) function = function.replace('if ', 'if') function = function.replace(' (', '(') function = function.replace(' )', ')') function = function.replace(', ', ',') function = function.replace(' }', '}') return function def findallline(lines, target_string, current_line = 0, num_lines = []): if current_line == len(lines): return num_lines if target_string in lines[current_line]: num_lines.append(current_line) return findallline(lines, target_string, current_line + 1, num_lines) # - def case1(f, good_start): #case when there is a function of "public void good" good_list = extractfunction(f, good_start) if len(good_list) > 0: good_name = [] for line in good_list: name_end = line.find('(') name_start = line.find('g') good_name.append(line[name_start:name_end]) good_funcs = [] for name in good_name: start = findline(f, 'private void ' + name) + 1 good_funcs.append(extractfunction(f, start)) good_cleaned = [] for func in good_funcs: func = cleanupfunction(func) good_cleaned.append(func) return good_cleaned # + s_file = open('src_600.txt', 'w+') t_file = open('tgt_600.txt', 'w+') s_train = open('src_train.txt', 'w+') s_test = open('src_test.txt', 'w+') s_val = open('src_val.txt', 'w+') t_train = open('tgt_train.txt', 'w+') t_test = open('tgt_test.txt', 'w+') t_val = open('tgt_val.txt', 'w+') pairs = 0 good_files = 0 src_lengths = [] tgt_lengths = [] start_time = time.time() for i in range(len(file_list)): print('now processing [%d/%d]'%(i, len(file_list)), end = '\r') file = open(file_list[i], 'r') f = file.read() f = deletecomment(f) f = f.split('\n') good_start = findline(f, 'public void good(') + 1 bad_start = findline(f, 'public void bad') + 1 if bad_start > 0: bad_function = extractfunction(f, bad_start) bad_function = cleanupfunction(bad_function) ins = toWordLevel(bad_function) if len(ins) < 600: if good_start > 0: rnd = random.random() good_files += 1 good_cleaned = case1(f, good_start) for j in range(len(good_cleaned)): ins = toWordLevel(good_cleaned[j]) if len(ins) < 600: print(bad_function, file = s_file) print(good_cleaned[j], file = t_file) pairs += 1 if rnd < 0.9: print(bad_function, file = s_train) print(good_cleaned[j], file = t_train) elif rnd >= 0.9 and rnd < 0.95: print(bad_function, file = s_test) print(good_cleaned[j], file = t_test) else: print(bad_function, file = s_val) print(good_cleaned[j], file = t_val) file.close() print() print('time elapsed %.2f'%(time.time() - start_time)) print('good files: [%d/%d]'%(good_files, len(file_list))) print('number of pairs: %d'%pairs) s_file.close() t_file.close() s_train.close() t_train.close() s_test.close() t_test.close() s_val.close() t_val.close() # - plt.hist(src_lengths, bins = 100) plt.xlim(0,2000) plt.show() plt.hist(tgt_lengths, bins = 100) plt.xlim(0,2000) plt.show() bad_function def toWordLevel(instance): instance = ' '.join(instance.split()) parser_list_lv1 = ['==', '!=', '&&', '||', '<=', '>=', '__'] parser_list_lv2 = ['!', ';', '=', '+', '-', '&', '%', '*', ':', '.', '|', '/', '(', ')', '{', '}', '[', ']', '<', '>', '\'', '\"', ',', '_', ' '] parselv1 = [] while len(instance) > 2: i = 0 while True: if instance[i:i+2] in parser_list_lv1: if i != 0: parselv1.append(instance[:i]) parselv1.append(instance[i:i+2]) instance = instance[i+2:] break if i == len(instance): parselv1.append(instance) instance = '' break i += 1 parselv2 = [] for st in parselv1: if st not in parser_list_lv1: while len(st) > 0: i = 0 while True: if i == len(st): parselv2.append(st) st = '' break if st[i] in parser_list_lv2: if i != 0: parselv2.append(st[:i]) parselv2.append(st[i]) st = st[i+1:] break i += 1 else: parselv2.append(st) return parselv2 src = open('src.txt', 'r').read().split('\n')[:-1] src_lengths = [] for i in range(len(src)): print('[%d/%d]'%(i, len(src)), end = '\r') ins = toWordLevel(src[i]) src_lengths.append(len(ins)) tgt = open('tgt.txt', 'r').read().split('\n')[:-1] tgt_lengths = [] for i in range(len(tgt)): print('[%d/%d]'%(i, len(tgt)), end = '\r') ins = toWordLevel(tgt[i]) tgt_lengths.append(len(ins)) plt.hist(src_lengths, bins = 100) plt.show() plt.hist(tgt_lengths, bins = 100) plt.show() # + s_file = open('src_600.txt', 'w+') t_file = open('tgt_600.txt', 'w+') s_train = open('src_train.txt', 'w+') s_test = open('src_test.txt', 'w+') s_val = open('src_val.txt', 'w+') t_train = open('tgt_train.txt', 'w+') t_test = open('tgt_test.txt', 'w+') t_val = open('tgt_val.txt', 'w+') for i in range(len(src)): if src_lengths[i] < 600 and tgt_lengths[i] < 600: print(src[i], file = s_file) print(tgt[i], file = t_file) rnd = random.random() if rnd < 0.9: print(src[i], file = s_train) print(tgt[i], file = t_train) elif rnd > 0.9 and rnd < 0.95: print(src[i], file = s_test) print(tgt[i], file = t_test) else: print(src[i], file = s_val) print(tgt[i], file = t_val) s_file.close() t_file.close() s_train.close() s_test.close() s_val.close() t_train.close() t_test.close() t_val.close() # + src = open('src_600.txt', 'r').read().split('\n')[:-1] tgt = open('tgt_600.txt', 'r').read().split('\n')[:-1] vocab_file = open('vocab.txt', 'w+') vocab = {} for i in range(len(src)): print('[%d/%d] processed!'%(i, len(src)), end = '\r') ins = toWordLevel(src[i]) for word in ins: if word in vocab: vocab[word] += 1 else: vocab[word] = 0 ins = toWordLevel(tgt[i]) for word in ins: if word in vocab: vocab[word] += 1 else: vocab[word] = 0 # - for key in vocab.keys(): print(key, file = vocab_file) vocab_file.close() src = open('src_600.txt').read().split('\n') tgt = open('tgt_600.txt').read().split('\n') src_len = [] tgt_len = [] for i in range(len(src)): src_len.append(len(src[i])) tgt_len.append(len(tgt[i])) plt.hist(src_len, bins = 100, alpha = 0.5) plt.hist(tgt_len, bins = 100, alpha = 0.5)
Data/Juliet-Java/Juliet-Java-v103/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NbConvert # ## Command line usage # `NbConvert` is the library, and the command line tool that allow to convert from notebook to other formats. # It is a technological preview in 1.0 but is already usable and highly configurable. # It ships already with many default available formats : `html`, `latex`, `markdown`, `python`, `rst` and `slides` # which are fully base on Jinja templating engine, so writing a converter for your custom format or tweeking the existing # one should be extra simple. # You can invoke nbconvert by doing # # ```bash # $ ipython nbconvert <options and arguments> # ``` # # Call `ipython nbconvert` with the `--help` flag or no aruments to get basic help on how to use it. # For more information about configuration use the `--help-all` flag # ### Basic export # We will be converting `Converting Notebooks With nbconvert.ipynb'`. # Be sure to have runed some of the cells in it to have output otherwise you will only see input in nbconvert. # Nbconvert **do not execute the code** in the notebook files, it only converts what is inside. # + jupyter={"outputs_hidden": false} language="bash" # jupyter nbconvert 'Converting Notebooks With nbconvert.ipynb' # - # Html is the default value (that can be configured) , so the verbose form would be # + jupyter={"outputs_hidden": false} language="bash" # jupyter nbconvert --to=html 'Converting Notebooks With nbconvert.ipynb' # - # You can also convert to latex, which will take care of extractin the embeded base64 encoded png, or the svg and call inkscape to convert those svg to pdf if necessary : # + jupyter={"outputs_hidden": false} language="bash" # jupyter nbconvert --to=latex 'Converting Notebooks With nbconvert.ipynb' # - # You should just have to compile the generated `.tex` file. If you get the required packages installed, if should compile out of the box. # # + jupyter={"outputs_hidden": false} language="bash" # jupyter nbconvert --to=pdf 'Index.ipynb' # - # Have a look at `04 - Custom Display Logic.pdf`, toward the end, where we compared `display()` vs `display_html()` and returning the object. # See how the cell where we use `display_html` was not able to display the circle, whereas the two other ones were able to select one of the oher representation they know how to display. # ### Customizing template # let's look at the first 20 lines of the `python` exporter # + jupyter={"outputs_hidden": false} # pyfile = !jupyter nbconvert --to python 'Converting Notebooks With nbconvert.ipynb' --stdout for l in pyfile[20:40]: print(l) # - # We see that the non-code cell are exported to the file. To have a cleaner script, we will export only the code contained in the code cells. # # To do so, we will inherit the python template, and overwrite the markdown blocks to be empty. # + jupyter={"outputs_hidden": false} # %%writefile simplepython.tpl {% extends 'python.tpl'%} {% block markdowncell -%} {% endblock markdowncell %} ## we also want to get rig of header cell {% block headingcell -%} {% endblock headingcell %} ## and let's change the appearance of input prompt {% block in_prompt %} # This was input cell with prompt number : {{ cell.prompt_number if cell.prompt_number else ' ' }} {%- endblock in_prompt %} # + jupyter={"outputs_hidden": false} # pyfile = !jupyter nbconvert --to python 'Converting Notebooks With nbconvert.ipynb' --stdout --template=simplepython.tpl for l in pyfile[4:40]: print(l) print('...') # - # I'll let you read Jinja manual for the exact syntax of the template. # ## Template that use cells metadata # Notebook fileformat support attaching arbitrary JSON metadata to each cell of a notebook. In this part we will use those metadata. # First you need to choose another notebook you want to convert to html, and tag some of the cell with metadata. # You can see the file `soln/celldiff.js` for a solution on how to tag, or follow the javascript tutorial to see how to do that. Use what we have written there to tag cells of some notebooks to `Easy`|`Medium`|`Hard`|`<None>`, and convert this notebook using your template. # you might need the following : # ``` # {% extends 'html_full.tpl'%} # {% block any_cell %} # {{ super() }} # <div style="background-color:red"> # <div style='background-color:orange'> # ``` # # `metadata` might not exist, be sure to : # # `cell['metadata'].get('example',{}).get('difficulty','')` # # tip: use `%%writefile` to edit the template in the notebook :-) # + jupyter={"outputs_hidden": false} language="bash" # # ipython nbconvert --to=html <your chosen notebook.ipynb> --template=<your template file> # + jupyter={"outputs_hidden": false} active="" # # %load ../../exercises/soln/coloreddiff.tpl # {% extends 'html_full.tpl'%} # # {% block any_cell %} # {% if cell['metadata'].get('example',{}).get('difficulty','') == 'Hard' -%} # <div style="background-color:red"> # {{ super() }} # </div> # {% elif cell['metadata'].get('example',{}).get('difficulty','') == 'Medium' %} # <div style='background-color:orange'> # {{ super() }} # </div> # {% else %} # {{ super() }} # {% endif %} # {%- endblock any_cell %} # + jupyter={"outputs_hidden": false} # #%%bash #jupyter nbconvert --to=html 'Converting Notebooks With nbconvert.ipynb' --template=../../exercises/soln/coloreddiff.tpl # - # ### Get rid of all command line flags. # As of all of IPython nbconvert can be configured using profiles and passing the `--profile` flag. # Moreover if a `config.py` file exist in current working directory nbconvert will use that, or read the config file you give to it with the `--config=<file>` flag. # # In the end, if you are often running nbconvert on the sam project, `$ ipython nbconvert` should be enough to get you up and ready. # ## Cleanup # !rm -f 'Converting Notebooks With nbconvert.html' # !rm -f 'Converting Notebooks With nbconvert.tex' # !rm -f 'Converting Notebooks With nbconvert.pdf' # !rm -f 'Index.pdf' # !rm -f simplepython.tpl
001-Jupyter/001-Tutorials/003-IPython-in-Depth/examples/Notebook/Converting Notebooks With nbconvert_skip.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # default_exp models.positional_encoders # - # # Positional encoders # # > This includes some variations of positional encoders used with Transformers. # ## Imports #export from tsai.imports import * from tsai.utils import * # ## Positional encoders # + #export def PositionalEncoding(q_len, d_model, normalize=True): pe = torch.zeros(q_len, d_model) position = torch.arange(0, q_len).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) if normalize: pe = pe - pe.mean() pe = pe / (pe.std() * 10) return pe SinCosPosEncoding = PositionalEncoding # - pe = PositionalEncoding(1000, 512).detach().cpu().numpy() plt.pcolormesh(pe, cmap='viridis') plt.title('PositionalEncoding') plt.colorbar() plt.show() pe.mean(), pe.std(), pe.min(), pe.max(), pe.shape #export def Coord2dPosEncoding(q_len, d_model, exponential=False, normalize=True, eps=1e-3, verbose=False): x = .5 if exponential else 1 i = 0 for i in range(100): cpe = 2 * (torch.linspace(0, 1, q_len).reshape(-1, 1) ** x) * (torch.linspace(0, 1, d_model).reshape(1, -1) ** x) - 1 pv(f'{i:4.0f} {x:5.3f} {cpe.mean():+6.3f}', verbose) if abs(cpe.mean()) <= eps: break elif cpe.mean() > eps: x += .001 else: x -= .001 i += 1 if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe cpe = Coord2dPosEncoding(1000, 512, exponential=True, normalize=True).cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord2dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(0)) plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max() #export def Coord1dPosEncoding(q_len, exponential=False, normalize=True): cpe = (2 * (torch.linspace(0, 1, q_len).reshape(-1, 1)**(.5 if exponential else 1)) - 1) if normalize: cpe = cpe - cpe.mean() cpe = cpe / (cpe.std() * 10) return cpe cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord1dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max(), cpe.shape cpe = Coord1dPosEncoding(1000, exponential=True, normalize=True).detach().cpu().numpy() plt.pcolormesh(cpe, cmap='viridis') plt.title('Coord1dPosEncoding') plt.colorbar() plt.show() plt.plot(cpe.mean(1)) plt.show() cpe.mean(), cpe.std(), cpe.min(), cpe.max() #hide from tsai.imports import * from tsai.export import * nb_name = get_nb_name() # nb_name = "100c_models.positional_encoders.ipynb" create_scripts(nb_name);
nbs/100c_models.positional_encoders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Import libraries from keras import optimizers, losses, activations, models from keras.callbacks import ModelCheckpoint, EarlyStopping, LearningRateScheduler, ReduceLROnPlateau from keras.layers import Layer, GRU, LSTM, Dense, Input, Dropout, Convolution1D, MaxPool1D, GlobalMaxPool1D, GlobalAveragePooling1D, \ concatenate from keras.layers import LeakyReLU from keras import regularizers, backend, initializers from keras.models import Sequential from keras.utils import to_categorical from keras.initializers import Ones, Zeros from keras.wrappers.scikit_learn import KerasClassifier import keras.backend as K from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.preprocessing import RobustScaler from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import cross_val_score from sklearn import preprocessing from sklearn.metrics import accuracy_score, f1_score, roc_auc_score import time import gc import pandas as pd import numpy as np from numpy import loadtxt import pylab as plt import seaborn as sns import shap # Load JS visualization code to notebook shap.initjs() # + # Load array train = loadtxt('train.csv', delimiter=',') test = loadtxt('test.csv', delimiter=',') # Split array train_x = train[:,:11] test_x = test[:,:11] train_y = train[:,11] test_y = test[:,11] # + # Define Layer Normalization class class LayerNormalization(Layer): def __init__(self, eps=1e-6, **kwargs): self.eps = eps super(LayerNormalization, self).__init__(**kwargs) def build(self, input_shape): self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True) self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True) super(LayerNormalization, self).build(input_shape) def call(self, x): mean = K.mean(x, axis=-1, keepdims=True) std = K.std(x, axis=-1, keepdims=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta def compute_output_shape(self, input_shape): return input_shape layer_size1 = 12 layer_size2 = 10 layer_size3 = 7 layer_size4 = 5 layer_size5 = 4 layer_size6 = 3 timesteps = 1 # static data data_dim = 11 X_train = np.reshape(train_x, (train_x.shape[0], 1, train_x.shape[1])) X_test = np.reshape(test_x, (test_x.shape[0], 1, test_x.shape[1])) def create_model(learning_rate=0.001): model = Sequential() model.add(GRU(layer_size1, return_sequences=True, input_shape=(timesteps, data_dim))) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(GRU(layer_size2, return_sequences=True)) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(GRU(layer_size3, return_sequences=True)) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(GRU(layer_size4, return_sequences=True)) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(GRU(layer_size5, return_sequences=True)) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(GRU(layer_size6, return_sequences=False)) model.add(LayerNormalization()) model.add(LeakyReLU(alpha=0.01)) model.add(Dense(2, activation='softmax')) opt = optimizers.Adam(learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return model # + RNN = KerasClassifier(build_fn=create_model) # Grid Search to optimize the model parameters = {'learning_rate':[0.001, 0.01, 0.1, 1]} scoring = ['roc_auc', 'accuracy', 'f1'] clf = GridSearchCV(RNN, parameters, scoring, refit = 'roc_auc') grid_result = clf.fit(X_train, train_y) # - # Obtain the parameters for the best model print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
RNN/.ipynb_checkpoints/RNN _gridsearch-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Execute this cell to create a simple BlocksWorld language and # bind as local variables sorts, functions and predicates. import tarski from tarski.symbols import * # 1. Create language used to describe world states and transitions bw = tarski.language() # 2. Define sorts place = bw.sort('place') block = bw.sort('block', place) # 3. Define functions loc = bw.function( 'loc', block, place ) width = bw.function('width', block, bw.Real) # 4. Define predicates clear = bw.predicate( 'clear', block) # - b1, b2, b3, b4 = [ bw.constant( 'b_{}'.format(k), block) for k in (1,2,3,4) ] table = bw.constant('table', place) # + import tarski.model import tarski.evaluators s = tarski.model.create(bw) s.evaluator = tarski.evaluators.get_entry_point('simple') # - s.set( loc(b1), table) s.set(loc(b2), table) # block b2 is on the table s.set(loc(b3), b2) # block b3 is on block 2 s.set(width(b1), 3.0) # block b1 has a width of 3.0 units s.set(width(b2), 3.0) # block b2 has a width of 3.0 units s.set(width(b3), 4.0) # block b3 has a width of 4.0 units # ### Built-in Function Symbols # A number of functions are already defined for the built-in sorts ```Real```, ```Integer``` and ```Natural```, in order to facilitate the definition of terms that model arithmetic operators, algebraic and transcendental functions # | Name | Syntax | Notes | Name | Syntax | Notes | # |------|:--------|:-------------------|:----------|:-----|:------| # | Addition | `x + y` | | Power | `x ** y` | | # | Subtraction | `x - y` | | | | | # | Multiplication | `x * y` | | | | | # | Division | `x / y` | | | || # | Modulo | `x % y` | | | | | # # #### Notes # #### Examples # We can write crazy stuff like: expr1 = width(b1) + width(b2) print(expr1) expr2 = width(b1) * width(b2) print(expr2) # and evaluate it with the $\sigma$ operator, which is implemented by the ```eval``` method of our language s[expr1] s[expr2] # ### Giving Meaning to Predicates # ```Tarski``` languages represent $P^{\cal M}$ either _extensionally_, as **tables** of tuples of constants for which the predicate is defined, or _intensionally_ as a **procedure** that maps a tuple of constants into either $\top$ or $\bot$. # # We can add a new tuple of constants to the extension of predicate $P$ using the method ```add``` my_blocks = [ b1, b2, b3, b4] for b in my_blocks : s.add(clear,(b)) # Evaluating the satisfiability of a predicate $P$ under a given interpretation of its arguments can be done via the ```satisifed``` method print(s[clear(b1)]) print(s[clear(b2)]) # # # Relational operators like $>$, $=$ etc. are represented always **procedurally** when the sorts of the arguments are built-in sorts such as ```Real```. # **NB**: It may be interesting to highlight that predicates are actually _constraints_ that we use to define states. # ### Formula evaluation # ### Example: Nested functions for compact preconditions looking_at = bw.function('looking_at', block) s.set( looking_at, tuple(), b1 ) precondition = (width(looking_at()) <= width(b2)) & (clear(b2)) s[precondition] # **Next**: [Classical Planning Tasks](005_functional_strips.ipynb)
notebooks/101_advanced_tutorial_theories.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,py:light # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # This notebook create the tests in python code. All this cells must be run to executed the tests # %load_ext autoreload # %autoreload 2 # + inputHidden=false outputHidden=false import sys sys.path.append("../..") # - from optimus import Optimus from optimus.helpers.test import Test op = Optimus(master='local', verbose=True) # + import pandas as pd from pyspark.sql.types import * from datetime import date, datetime cols = [ ("names", "str"), ("height(ft)", ShortType()), ("function", "str"), ("rank", ByteType()), ("age", "int"), ("weight(t)", "float"), "japanese name", "last position seen", "date arrival", "last date seen", ("attributes", ArrayType(FloatType())), ("Date Type", DateType()), ("timestamp", TimestampType()), ("Cybertronian", BooleanType()), ("function(binary)", BinaryType()), ("NullType", NullType()) ] rows = [ ("Optim'us", -28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10", "2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"), None), ("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10", "2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True, bytearray("Espionage", "utf-8"), None), ("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10", "2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True, bytearray("Security", "utf-8"), None), ("Jazz", 13, "First Lieutenant", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10", "2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True, bytearray("First Lieutenant", "utf-8"), None), ("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0], date(2012, 5, 10), datetime(2014, 6, 24), True, bytearray("None", "utf-8"), None), ("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10", [91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("Battle Station", "utf-8"), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None), ] source_df = op.create.df(cols ,rows) source_df.table() # + import pandas as pd from pyspark.sql.types import * from datetime import date, datetime cols = [ ("names", "str"), ("height(ft)", ShortType()), ("function", "str"), ("rank", ByteType()), ("age", "int"), ("weight(t)", "float"), "japanese name", "last position seen", "date arrival", "last date seen", ("attributes", ArrayType(FloatType())), ("Date Type", DateType()), ("timestamp", TimestampType()), ("Cybertronian", BooleanType()) ] rows = [ ("Optim'us", -28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10", "2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True), ("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10", "2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True), ("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10", "2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True), ("Jazz", 13, "<NAME>", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10", "2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True), ("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0], date(2012, 5, 10), datetime(2014, 6, 24), True), ("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10", [91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True), ] source_df_string_to_index = op.create.df(cols ,rows) source_df_string_to_index.table() # - # ### End Init Section # # Test # ## Optimus Test from pyspark.ml.linalg import Vectors t = Test(op, None, "create_df", imports=["import datetime", "from pyspark.sql import functions as F"], path = "..", final_path="..") # + one_column = {"rows":["Argenis", "Favio", "Matthew"], "cols":["name"]} plain = {"rows":[("BOB", 1),("JoSe", 2)],"cols":["name","age"]} plain_infer_false = {"rows":[("BOB", 1),("JoSe", 2)],"cols":["name","age"],"infer_schema":False} with_data_types = {"rows":[("BOB", 1),("JoSe", 2)],"cols":[("name", StringType(), True),("age", IntegerType(), False)]} nullable = {"rows":[("BOB", 1),("JoSe", 2)],"cols":[("name", StringType()),("age", IntegerType())]} df1 = op.create.df(**one_column) df2 = op.create.df(**plain) df3 = op.create.df(**plain_infer_false) df4 = op.create.df(**with_data_types) df5 = op.create.df(**nullable) # - t.create(df1, None, "one_column", "df", **one_column) t.create(df2, None, "plain", "df", **plain) t.create(df3, None, "plain_infer_false", "df", **plain_infer_false) t.create(df4, None, "with_data_types", "df", **with_data_types) t.create(df5, None, "nullable", "df", **nullable) t.run() # ## Columns Test from pyspark.ml.linalg import Vectors t = Test(op, source_df, "df_cols", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F"], path = "df_cols", final_path="..") # + from pyspark.sql import functions as F def func(col_name, attrs): return F.col(col_name) * 2 numeric_col = "height(ft)" numeric_col_B = "rank" numeric_col_C = "rank" string_col = "function" date_col = "date arrival" date_col_B = "last date seen" new_col = "new col" array_col = "attributes" # - t.create(source_df_string_to_index, "cols.string_to_index", None, "df", None, "rank") source_df_index_to_string = source_df_string_to_index.cols.string_to_index("rank") # FIX at creation time we los the metadata. Need to find a way to put it on the dataframe creation t.delete(source_df_index_to_string, "cols.index_to_string", None, "df", None, "rank***STRING_TO_INDEX") t.run() t.create(source_df_string_to_index, "cols.values_to_cols", None, "df", None, "rank") t.run() t.create(source_df_string_to_index, "cols.values_to_cols", "all_columns", "df", None, ["names","height(ft)"]) t.run() t.create(None, "cols.remove", None, "df", None, string_col, "i") t.run() t.create(None, "cols.remove", "list", "df", string_col, ["a","i","Es"]) t.create(None, "cols.remove", "list_output", "df", string_col, ["a","i","Es"], output_cols=string_col+"_new") t.run() t.create(None, "cols.min", None, "json", numeric_col) t.create(None, "cols.min", "all_columns", "json", None, "*") t.run() t.create(None, "cols.max", None, "json", numeric_col) t.create(None, "cols.max", "all_columns", "json", None, "*") t.run() t.create(None, "cols.range", None, "json",None, numeric_col) t.create(None, "cols.range", "all_columns", "json",None, "*") t.run() source_df.table() t.create(None, "cols.median", None, "json", None,numeric_col) t.create(None, "cols.median", "all_columns", "json", None, "*") t.run() t.create(None, "cols.percentile", None, "json", None, numeric_col, [0.05, 0.25], 1) t.create(None, "cols.percentile", "all_columns", "json", None, "*", [0.05, 0.25], 1) # ## MAD t.create(None, "cols.mad", None, "json", None, numeric_col) t.create(None, "cols.mad", "all_columns", "json", None, "*") t.run() t.create(None, "cols.std", None, "json", numeric_col) t.create(None, "cols.std", "all_columns", "json", None, "*") t.run() t.create(None, "cols.kurt", None, "json", None, numeric_col) t.run() t.create(None, "cols.kurt", "all_columns", "json", None, "*") t.run() t.create(None, "cols.mean", None, "json", numeric_col) t.create(None, "cols.mean", "all_columns", "json", None, "*") t.run() t.create(None, "cols.skewness", None, "json", numeric_col) t.create(None, "cols.skewness", "all_columns", "json", None, "*") t.run() t.create(None, "cols.sum", None, "json", numeric_col) t.create(None, "cols.sum", "all_columns", "json", None,"*") t.run() t.create(None, "cols.variance", None, "json", numeric_col) t.create(None, "cols.variance", "all_columns", "json", None, "*") t.run() source_df.table() from pyspark.sql import functions as F source_df.select(F.abs(F.col("age"))) t.create(None, "cols.abs", None, "df", None,"weight(t)") t.create(None, "cols.abs", "all_columns", "json", None, "*") source_df.table() # + from pyspark.sql import functions as F source_df.select(F.abs("weight(t)")) # - t.create(None, "cols.mode", None, "json", None, numeric_col) # %%time t.create(None, "cols.mode", "all_columns", "json", None,"*") t.run() t.create(None, "cols.count", None, "json") # ## Count na t.create(None, "cols.count_na", None, "json", None, numeric_col) t.create(None, "cols.count_na", "all_columns", "json",None, "*") t.run() source_df.cols.names("rank",["str","int","float"],True) t.create(None, "cols.count_zeros", None, "json", numeric_col) t.create(None, "cols.count_zeros", "all_columns", "json", None, "*") t.run() t.run() source_df.cols.names() # ## Value counts t.create(None, "cols.value_counts", None, "json", None, numeric_col) t.run() t.create(None, "cols.value_counts", "all_columns", "json", None, "*") t.run() t.create(None, "cols.count_uniques", None, "json", None, numeric_col) t.run() t.create(None, "cols.count_uniques", "all_columns", "json",None, "*") t.run() t.create(None, "cols.unique", None, "json", None,numeric_col) t.run() t.create(None, "cols.unique", "all_columns", "json", None,"*") t.run() t.create(None, "cols.add", None, "df", [numeric_col, numeric_col_B]) t.create(None, "cols.add", "all_columns", "df", "*"), t.create(None, "cols.sub", None, "df", [numeric_col, numeric_col_B]) t.create(None, "cols.sub", "all_columns", "df", "*") t.create(None, "cols.mul", None, "df", [numeric_col, numeric_col_B]) t.create(None, "cols.mul", "all_columns", "df", "*") t.create(None, "cols.div", None, "df", [numeric_col, numeric_col_B]) t.create(None, "cols.div", "all_columns", "df", "*") t.create(None, "cols.z_score", None, "df", numeric_col) t.create(None, "cols.z_score", "all_columns", "df", "*") t.create(None, "cols.iqr", None, "json", None, numeric_col) t.create(None, "cols.iqr", "all_columns", "json",None, "*") t.run() t.create(None, "cols.lower", None, "df", string_col) t.create(None, "cols.lower", "all_columns", "df", "*") t.create(None, "cols.upper", None, "df", string_col) t.create(None, "cols.upper", "all_columns", "df", "*") t.create(None, "cols.trim", None, "df", numeric_col) t.create(None, "cols.trim", "all_columns", "df", "*") t.create(None, "cols.reverse", None, "df", string_col) t.create(None, "cols.reverse", "all_columns", "df", "*") t.create(None, "cols.remove_accents", None, "df", string_col) t.create(None, "cols.remove_accents", "all_columns", "df", string_col) source_df.table() t.create(None, "cols.remove_special_chars", None, "df", string_col) t.create(None, "cols.remove_special_chars", "all_columns","df", None, "*") t.run() # t.create(None, "cols.value_counts", None, "json", None, numeric_col) source_df.cols.remove_special_chars("*").table() t.create(None, "cols.remove_white_spaces", None, "df", string_col) t.create(None, "cols.remove_white_spaces", "all_columns", "df", "*") t.create(None, "cols.date_transform", None, "df", date_col, "yyyy/MM/dd", "dd-MM-YYYY") t.run() t.create(None, "cols.date_transform", "all_columns", "df", [date_col, date_col_B], "yyyy/MM/dd", "dd-MM-YYYY") # t.create(None, "cols.years_between", None, "df", date_col, "yyyy/MM/dd") t.delete(None, "cols.years_between", None, "df", date_col, "yyyy/MM/dd") # t.create(None, "cols.years_between", "multiple_columns", "df", [date_col, date_col_B], "yyyy/MM/dd") t.delete(None, "cols.years_between", "multiple_columns", "df", [date_col, date_col_B], "yyyy/MM/dd") t.run() t.create(None, "cols.impute", None, "df", numeric_col_B) # %%time t.create(None, "cols.impute", "all_columns","df", None ,"names","categorical") t.run() # ## Hist t.create(None, "cols.hist", None, "json", None, ["height(ft)",numeric_col_B], 4) t.run() t.create(None,"cols.hist","all_columns","json",None, "Date Type",4) t.run() t.create(None, "cols.frequency", None, "dict", None, numeric_col_B, 4) t.run() t.create(None, "cols.frequency", "all_columns", "dict", None, "*", 4) t.run() t.create(None, "cols.schema_dtype", None, "json", numeric_col_B) # Problems with casting # t.delete(None, "cols.schema_dtype", "all_columns", "json", "*") t.run() t.create(None, "cols.dtypes", None, "json", None, numeric_col_B) t.run() t.create(None, "cols.dtypes", "all_columns", "json", "*") t.create(None, "cols.select_by_dtypes", "str", "df", None, "str") t.create(None, "cols.select_by_dtypes", "int", "df", "int") t.create(None, "cols.select_by_dtypes", "float", "df", "float") t.create(None, "cols.select_by_dtypes", "array", "df", "array") t.create(None, "cols.names", None, "json") t.create(None, "cols.qcut", None, "df", numeric_col_B, 4) t.create(None, "cols.qcut", "all_columns", "df", "*", 4) t.create(None, "cols.clip", None, "df", numeric_col_B, 3, 5) t.create(None, "cols.clip", "all_columns", "df", "*", 3, 5) t.create(None, "cols.replace", "full", "df", None,string_col,["First Lieutenant","Battle"], "Match", search_by="full") t.create(None, "cols.replace", "words", "df", None,string_col,["Security", "Leader"], "Match", search_by="words") t.run() t.create(None, "cols.replace", "chars", "df", None,string_col,["F", "E"], "Match", search_by="chars") t.create(None, "cols.replace", "numeric", "df", None,"age",5000000, 5, search_by="numeric") t.run() # Assert is failing I can see why t.create(None, "cols.replace", "all_columns", "df", None,"*", ["Jazz", "Leader"], "Match") t.run() # Its necesary to save the function t.delete(None, "cols.apply_expr", None, "df", numeric_col_B, func) # Its necesary to save the function t.delete(None, "cols.apply_expr", "all_columns", "df", [numeric_col_B,numeric_col_C], func) t.create(None, "cols.append", "number", "df", new_col, 1) df_col = op.create.df( [ ("new_col", "str", True), ],[ ("q"),("w"), ("e"), ("r"), ]) t.create(None, "cols.append", "dataframes", "df", None, df_col) #t.create(None, "cols.append", "advance", "df", [("new_col_4", "test"), # ("new_col_5", df[numeric_col_B] * 2), # ("new_col_6", [1, 2, 3]) # ]), t.create(None, "cols.rename", None, "df", numeric_col_B, numeric_col_B + "(old)") t.create(None, "cols.rename", "list", "df", [numeric_col, numeric_col + "(tons)", numeric_col_B, numeric_col_B + "(old)"]) t.create(None, "cols.rename", "function", "df", str.upper) t.create(None, "cols.drop", None, "df", numeric_col_B) t.create(None, "cols.cast", None, "df", string_col, "string") t.create(None, "cols.cast", "all_columns", "df", "*", "string") t.run() # Problems with precision t.delete(None, "cols.cast", "vector", "df", array_col, Vectors) t.create(None, "cols.keep", None, "df", numeric_col_B) t.create(None, "cols.move", "after", "df", numeric_col_B, "after", array_col) t.create(None, "cols.move", "before", "df", numeric_col_B, "before", array_col) t.create(None, "cols.move", "beginning", "df", numeric_col_B, "beginning") t.create(None, "cols.move", "end", "df", numeric_col_B, "end") t.create(None, "cols.select", None, "df", 0, numeric_col) t.create(None, "cols.select", "regex", "df", "n.*", regex=True), t.create(None, "cols.sort", None, "df") t.run() t.create(None, "cols.sort", "desc", "df", None,"desc") t.create(None, "cols.sort", "asc", "df", None, "asc") t.run() t.create(None, "cols.fill_na", None, "df", numeric_col, "1") t.create(None, "cols.fill_na", "array", "df", None, "japanese name", ["1","2"]) t.run() t.create(None, "cols.fill_na", "bool", "df", None, "Cybertronian", False) t.run() # + jupyter={"outputs_hidden": true} t.create(None, "cols.fill_na", "all_columns", "df", ["names","height(ft)", "function", "rank", "age"], "2") # - # ## Nest t.create(None, "cols.nest", None, "df", None, [numeric_col, numeric_col_B], separator=" ",output_col=new_col) # + # t.create(None, "cols.nest", "mix", "df", [F.col(numeric_col_C), F.col(numeric_col_B)], "E", separator="--") # + df_na = source_df.cols.drop("NullType").rows.drop_na("*") t.create(df_na, "cols.nest", "vector_all_columns", "df", None,[numeric_col_C, numeric_col_B], shape="vector", output_col=new_col) # - t.create(df_na, "cols.nest", "vector", "df", None, [numeric_col_C, numeric_col_B], shape="vector",output_col=new_col) t.create(None, "cols.nest", "array", "df", None, [numeric_col, numeric_col_B,numeric_col_C], shape="array", output_col=new_col) t.create(None, "cols.count_by_dtypes", None, "dict", None, "*", infer=False) t.create(None, "cols.count_by_dtypes", "infer", "dict", None, "*", infer=True) t.run() # + dtypes_df = op.create.df( [ ("col 1", "str", True), ("col 2", "str", True), ("col 3", "int", True), ], [ ("male","male",1), ("optimus","bumblebee",1), ("3","4.1",1), ("true","False",1), ("[1,2,3,4]","(1,2,3,4)",1), ("{1,2,3,4}","{'key1' :1 , 'key2':2}",1), ("1.1.1.1","192.168.3.11",1), ("http://hi-optimuse.com","https://hi-bumblebee.com",1), ("<EMAIL>","<EMAIL>",1), ("5123456789123456","373655783158306",1), ("11529","30345",1), ("04/10/1980","04/10/1980",1), ("null","Null",1), ("","",1), (None,None,1) ], infer_schema=True) # - t.create(dtypes_df, "cols.count_by_dtypes", "infer", "dict", None, "*", infer=True) t.run() t.create(source_df, "cols.count_by_dtypes", None, "dict", None, "*", infer=False) t.run() source_df.table() # + import logging import sys from datetime import date, datetime from pyspark.sql.types import * from optimus import Optimus mismatch_df = op.create.df( [ ("names", "str", True), ("height(ft)", "int", True), ("function", "str", True), ("rank", "int", True), ("age", "int", True), ("weight(t)", "float", True), ("japanese name", ArrayType(StringType()), True), ("last position seen", "str", True), ("date arrival", "str", True), ("last date seen", "str", True), ("attributes", ArrayType(FloatType()), True), ("DateType", DateType()), ("Timestamp", TimestampType()), ("Cybertronian", "bool", True), ("function(binary)", "binary", False), ("NullType", "null", True), ], [ ("31/12/2019", 28, "1978-12-20", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10", "2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"), None), ("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10", "2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True, bytearray("Espionage", "utf-8"), None), ("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10", "2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True, bytearray("Security", "utf-8"), None), ("Jazz", 13, "First Lieutenant", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10", "2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True, bytearray("First Lieutenant", "utf-8"), None), ("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0], date(2012, 5, 10), datetime(2014, 6, 24), True, bytearray("None", "utf-8"), None), ("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10", [91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("Battle Station", "utf-8"), None), ("1", 2, "3", 4, 5, 6.0, ["7"], 8, "1980/04/10", "2011/04/10", [11.0], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("15", "utf-8"), None) ], infer_schema=True) # - mismatch = {"names":"dd/mm/yyyy","height(ft)":r'^([0-2][0-9]|(3)[0-1])(\/)(((0)[0-9])|((1)[0-2]))(\/)\d{4}$',"function":"yyyy-mm-dd"} m = {"names":"int"} mismatch_df.table() t.create(mismatch_df, "cols.count_mismatch", None, "dict", None, {"names":"int"}) t.run() # ## Unnest String t.create(None, "cols.unnest", "string_multi_index", "df", None, date_col, "/", splits=3, index=2) t.create(None, "cols.unnest", "string_multi_index", "df", None, date_col, "/", splits=3, index=[1,2]) t.create(None, "cols.unnest", "string_infer_split", "df", None, date_col, "/") t.create(None, "cols.unnest", "string_no_index", "df", None, date_col, "/", splits=3) t.create(None, "cols.unnest", "string_output_columns", "df", None, date_col, "/", splits=3, output_cols= [("year", "month","day")]) t.create(None, "cols.unnest", "array_index", "df", None, array_col, index=2) t.create(None, "cols.unnest", "array_multi_index", "df", None, array_col, index=[1,2]) t.run() t.create(None, "cols.unnest", "string_multi_colum_multi_index_multi_output", "df", None, ["date arrival","last date seen"], "/", index=[(1,2),(1,2)], output_cols=[("year1","month1"),("year2","month2")]) t.create(None, "cols.unnest", "string_multi_colum_multi_output", "df", None, ["date arrival","last date seen"], "/", output_cols=[("year1","month1"),("year2","month2")]) t.create(None, "cols.unnest", "array", "df", array_col) t.create(None, "cols.unnest", "array_all_columns", "df", array_col) t.create(None, "cols.is_na", "all_columns", "df", "*") t.create(None, "cols.is_na", None, "df", numeric_col) t.run() from pyspark.sql.types import * from optimus import Optimus from optimus.helpers.json import json_enconding from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector import numpy as np nan = np.nan import datetime # + actual_df =op.load.json('https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json') expected_df = op.create.df([('billingId', LongType(), True),('birth', StringType(), True),('dummyCol', StringType(), True),('firstName', StringType(), True),('id', LongType(), True),('lastName', StringType(), True),('price', LongType(), True),('product', StringType(), True)], [(123, '1980/07/07', 'never', 'Luis', 1, 'Alvarez$$%!', 10, 'Cake')]) # assert (expected_df.collect() == actual_df.collect()) from deepdiff import DeepDiff # For Deep Difference of 2 objects actual_df.table() expected_df.table() # source_df.table() # print(actual_df.to_json()) # print(expected_df.to_json()) a1 = actual_df.to_json() e1 = expected_df.to_json() # - ddiff = DeepDiff(a1, e1, ignore_order=False) print(ddiff) # # Rows Test t = Test(op,df, "df_rows", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F", "from optimus.functions import abstract_udf as audf"]) rows = [ ("Optim'us", 28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10", "2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"), None) ] df = op.load.url("https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.csv") t = Test(op, source_df, "op_io", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F"],path = "op_io", final_path="..") t.create(op, "load.csv", "local_csv", "df", "../../examples/data/foo.csv") t.create(op, "load.json", "local_json", "df", "../../examples/data/foo.json") t.create(op, "load.parquet", "local_parquet", "df", "../../examples/data/foo.parquet") t.create(op, "load.csv", "remote_csv", "df", "https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.csv") t.create(op, "load.json", "remote_json", "df", "https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.json") t.create(op, "load.parquet", "remote_parquet", "df", "https://raw.githubusercontent.com/ironmussa/Optimus/master/examples/data/foo.parquet") # + from optimus.profiler.profiler import Profiler p = Profiler() print(p.run(source_df1, "japanese name")) # - # df_string = source_df.cols.cast("*","str") t.create(source_df, "save.csv", None, None, "test.csv") t.create(None, "save.json", None, None, "test.json") t.create(None, "save.parquet", None, None, "test.parquet") t.run() source_df.table() # # Ouliers # + import pandas as pd from pyspark.sql.types import * from datetime import date, datetime cols = [ ("names", "str"), ("height(ft)", ShortType()), ("function", "str"), ("rank", ByteType()), ("age", "int"), ("weight(t)", "float"), "japanese name", "last position seen", "date arrival", "last date seen", ("attributes", ArrayType(FloatType())), ("Date Type", DateType()), ("timestamp", TimestampType()), ("Cybertronian", BooleanType()), ("function(binary)", BinaryType()), ("NullType", NullType()) ] rows = [ ("Optim'us", -28, "Leader", 10, 5000000, 4.30, ["Inochi", "Convoy"], "19.442735,-99.201111", "1980/04/10", "2016/09/10", [8.5344, 4300.0], date(2016, 9, 10), datetime(2014, 6, 24), True, bytearray("Leader", "utf-8"), None), ("bumbl#ebéé ", 17, "Espionage", 7, 5000000, 2.0, ["Bumble", "Goldback"], "10.642707,-71.612534", "1980/04/10", "2015/08/10", [5.334, 2000.0], date(2015, 8, 10), datetime(2014, 6, 24), True, bytearray("Espionage", "utf-8"), None), ("ironhide&", 26, "Security", 7, 5000000, 4.0, ["Roadbuster"], "37.789563,-122.400356", "1980/04/10", "2014/07/10", [7.9248, 4000.0], date(2014, 6, 24), datetime(2014, 6, 24), True, bytearray("Security", "utf-8"), None), ("Jazz", 13, "<NAME>", 8, 5000000, 1.80, ["Meister"], "33.670666,-117.841553", "1980/04/10", "2013/06/10", [3.9624, 1800.0], date(2013, 6, 24), datetime(2014, 6, 24), True, bytearray("First Lieutenant", "utf-8"), None), ("Megatron", None, "None", 10, 5000000, 5.70, ["Megatron"], None, "1980/04/10", "2012/05/10", [None, 5700.0], date(2012, 5, 10), datetime(2014, 6, 24), True, bytearray("None", "utf-8"), None), ("Metroplex_)^$", 300, "Battle Station", 8, 5000000, None, ["Metroflex"], None, "1980/04/10", "2011/04/10", [91.44, None], date(2011, 4, 10), datetime(2014, 6, 24), True, bytearray("Battle Station", "utf-8"), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None), ] source_df = op.create.df(cols ,rows) source_df.table() # - t = Test(op, source_df, "df_outliers", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F"], path = "df_outliers", final_path="..") # + from pyspark.sql import functions as F def func(col_name, attrs): return F.col(col_name) * 2 numeric_col = "height(ft)" numeric_col_B = "rank" numeric_col_C = "rank" string_col = "function" date_col = "date arrival" date_col_B = "last date seen" new_col = "new col" array_col = "attributes" # - source_df.table() # ## Tukey t.create(None, "outliers.tukey", None, "df","select", numeric_col) source_df.outliers.tukey(numeric_col).drop().table() t.create(None, "outliers.tukey", None, "df","drop", numeric_col) t.create(None, "outliers.tukey", None, "json", "whiskers", numeric_col) t.create(None, "outliers.tukey", None, "json", "count", numeric_col) t.create(None, "outliers.tukey", None, "json", "non_outliers_count", numeric_col) t.create(None, "outliers.tukey", None, "json", "info", numeric_col) t.run() # ## Zscore threshold = 0.5 t.create(None, "outliers.z_score", None, "df","select", numeric_col, threshold) source_df.outliers.z_score('height(ft)',0.5).select() t.create(None, "outliers.z_score", None, "df","drop", numeric_col, threshold) t.create(None, "outliers.z_score", None, "json", "count", numeric_col, threshold) t.create(None, "outliers.z_score", None, "json", "non_outliers_count", numeric_col, threshold) t.create(None, "outliers.z_score", None, "json", "info", numeric_col, threshold) t.run() # ## Modified Zscore threshold = 0.5 relative_error = 10000 t.create(None, "outliers.modified_z_score", None, "df","select", numeric_col, threshold, relative_error) t.create(None, "outliers.modified_z_score", None, "df","drop", numeric_col, threshold, relative_error) t.create(None, "outliers.modified_z_score", None, "json","count", numeric_col, threshold, relative_error) t.create(None, "outliers.modified_z_score", None, "json","non_outliers_count", numeric_col, threshold, relative_error) t.create(None, "outliers.modified_z_score", None, "json","info", numeric_col, threshold, relative_error) t.run() # ## Mad threshold = 0.5 relative_error = 10000 t.create(None, "outliers.mad", None, "df","select", numeric_col, threshold, relative_error) t.create(None, "outliers.mad", None, "df","drop", numeric_col, threshold, relative_error) t.create(None, "outliers.mad", None, "json","count", numeric_col, threshold, relative_error) t.create(None, "outliers.mad", None, "json","non_outliers_count", numeric_col, threshold, relative_error) t.create(None, "outliers.mad", None, "json","info", numeric_col, threshold, relative_error) t.run() # ## Keycolision source_df = op.read.csv("../../examples/data/random.csv",header=True, sep=";").limit(10) source_df.table() # + t = Test(op, source_df, "df_keycollision", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F", "from optimus.ml import keycollision as keyCol"], path = "df_keycollision", final_path="..") from optimus.ml import keycollision as keyCol # + inputHidden=false outputHidden=false t.create(keyCol, "fingerprint", None, "df",None, source_df, "STATE") t.run() # + inputHidden=false outputHidden=false t.create(keyCol, "fingerprint_cluster", None, "json", None, source_df, "STATE") # - t.run() # + inputHidden=false outputHidden=false t.create(keyCol, "n_gram_fingerprint", None, "df", None, source_df, "STATE") # + inputHidden=false outputHidden=false t.create(keyCol, "n_gram_fingerprint_cluster", None, "json", None, source_df, "STATE", 2) # + inputHidden=false outputHidden=false t.run() # - # ## Distance cluster source_df = op.read.csv("../../examples/data/random.csv",header=True, sep=";").limit(1000) # + inputHidden=false outputHidden=false t = Test(op, source_df, "df_distance_cluster", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "import datetime", "from pyspark.sql import functions as F", "from optimus.ml import distancecluster as dc"], path = "df_distance_cluster", final_path="..") from optimus.ml import distancecluster as dc # - df.table() # + inputHidden=false outputHidden=false t.create(dc, "levenshtein_cluster", None, 'dict', None, source_df, "STATE") # - t.run() df_cancer = op.spark.read.csv('../data_cancer.csv', sep=',', header=True, inferSchema=True) columns = ['diagnosis', 'radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean', 'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean'] df_model, rf_model = op.ml.gbt(df_cancer, columns, "diagnosis") df_model.table() # ## Row source_df = op.create.df([ ("words", "str", True), ("num", "int", True), ("animals", "str", True), ("thing", StringType(), True), ("second", "int", True), ("filter", StringType(), True) ], [ (" I like fish ", 1, "dog dog", "housé", 5, "a"), (" zombies", 2, "cat", "tv", 6, "b"), ("simpsons cat lady", 2, "frog", "table", 7, "1"), (None, 3, "eagle", "glass", 8, "c"), ]) from optimus.audf import abstract_udf as audf t = Test(op, source_df, "df_rows", imports=["from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector", "import numpy as np", "nan = np.nan", "from optimus.audf import abstract_udf as audf", "import datetime", "from pyspark.sql import functions as F"], path = "df_rows", final_path="..") row =[("this is a word", 2, "this is an animal", "this is a thing", 64, "this is a filter",)] t.create(None, "rows.append", None, "df", None, row) fil = (source_df["num"] == 1) t.create(None, "rows.select", None, "df", None, fil) t.create(None, "rows.select_by_dtypes", None, "df", None, "filter", "integer") fil = (source_df["num"] == 2) | (source_df["second"] == 5) print(str(fil)) # type(fil) t.create(None, "rows.drop", None, "df", None, fil) t.create(None, "rows.drop_by_dtypes", None, "df", None, "filter", "integer") def func_data_type(value, attr): return value > 1 a = audf("num", func_data_type, "boolean") t.create(None, "rows.drop", "audf", "df", None, a) t.create(None, "rows.sort", None, "df", None, "num", "desc") t.create(None, "rows.is_in", None, "df", None, "num", 2) t.create(None, "rows.between", None, "df", None, "second", 6, 8) t.create(None, "rows.between", "equal", "df", None, "second", 6, 8, equal=True) t.create(None, "rows.between", "invert_equal", "df", None, "second", 6, 8, invert=True, equal=True) t.create(None, "rows.between", "bounds", "df", None, "second", bounds=[(6,7),(7,8)], invert=True, equal=True) t.run()
tests/creator/creator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Groupby, split-apply-combine and pandas # Netflix recently released some user ratings data. I wanted to ask a straightforward question: _do Netflix subscribers prefer older or newer movies?_ # # Intuitively, you want to split the dataset into groups, one for each year, and then to compute a summary statistic, such as the mean or the median, and then to see whether this statistic increases over the years (after this, you may want to perform a statistical test). # # The great thing is that there is a conceptual framework for doing and thinking about this, along with implementations in both Python and R. The framework is known as split-apply-combine because we # # * [Step 1](#step_1): split the data into groups by creating a _groupby_ object from the original DataFrame; # * [Step 2](#step_2): apply a function, in this case, an aggregation function that computes a summary statistic (you can also transform or filter your data in this step); # * [Step 3](#step_3): combine the results into a new DataFrame. # # This is the conceptual framework for the analysis at hand. In this post you'll learn how to do this to answer the Netflix ratings question above using the Python package `pandas`. You could do the same in R using, for example, the `dplyr` package. I'll also necessarily delve into _groupby_ objects, wich are not the most intuitive objects. The process of split-apply-combine with _groupby_ objects is a pattern that we all perform intuitively, as we'll see, but it took took <NAME> to formalize the procedure in 2011 with his paper _The Split-Apply-Combine Strategy for Data Analysis_. # # If you find this technique useful, you can learn more about it (among many other things) and practice it in our [Manipulating DataFrames with pandas course](https://www.datacamp.com/courses/manipulating-dataframes-with-pandas), taught by <NAME> of Anaconda. # ## Data Exploration with pandas # ### Import your data # Here you'll use `pandas`, _groupby_ objects and the principles of split-apply-combine to check out how Netflix movie ranges vary as a function of the year they were released. I originally came across the data on data.world [here](https://data.world/chasewillden/netflix-shows) and you can also find it at [here](https://theconceptcenter.com/simple-research-study-netflix-shows-analysis/) at The Concept Centre. You can find all the code in this post [here](https://github.com/datacamp/community-groupby) if you would like to reproduce it. # # You'll first import the necessary packages and the data and check out the first five rows of the data: # Import packages and set visualization style import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set() # %matplotlib inline # Import data and check out head of DataFrame df = pd.read_csv('data/chasewillden-netflix-shows/data/netflix.csv') df.head() # This looks pretty cool to me: you have titles, ratings, release year and user rating score, among several other columns. Before performing our _groupby_ and split-apply-combine procedure, lets look a bit more closely at the data to make sure it's what we think it is and to deal with missing values. Note that there is a missing value NaN in the *user_rating_score* of the second row (row 1). # ### Summarising your data with plots and statistics # The `pandas` DataFrame `.info()` method is invaluable. Applying it below shows that you have 1000 rows and 7 columns of data, but also that the column of interest, *user_rating_score*, has only 605 non-null values. This means that there are 395 missing values: # Check out info of DataFrame df.info() # You can drop rows that have any missing values, drop any duplicate rows and build a _pairplot_ of the DataFrame using `seaborn` in order to get a visual sense of the data. You'll color the data by the 'rating' column. Check out the plots and see what information you can get from them. # + # Drop rows with missing values and drop duplicate df.dropna(inplace=True) df.drop_duplicates(inplace=True) # Visualize pairplot of df sns.pairplot(df, hue='rating'); # - # Check out, for example, *user_rating_score* as a function of *release_year*. There is *not* a visually identifiable trend but perhaps some data analysis will draw out any trends. If you'd like to check out several summary statistics of the DataFrame, you can also do this using the `.describe()` method: # Get summary stats of df df.describe() # ## Groupbys and split-apply-combine to answer the question # <a id='step_1'></a> # ### Step 1. Split # Now that you've checked out out data, it's time for the fun part. You'll first use a *groupby* method to split the data into groups, where each group is the set of movies released in a given year. This is the *split* in split-apply-combine: # Group by year df_by_year = df.groupby('release_year') # This creates a *groupby* object: # Check type of GroupBy object type(df_by_year) # <a id='step_2'></a> # ### Step 2. Apply # Such *groupby* objects are very useful. Remember that the `.describe()` method for a DataFrame returns summary statistics for numeric columns? Well, the `.describe()` method for `DataFrameGroupBy` objects returns summary statistics for each numeric column, but computed for each group in the split. In your case, it's for each *release_year*. This is an example of the *apply* in split-apply-combine: you're *applying* the `.describe()` method to each group in the *groupby*. Do this and print the first 5 rows of the result: # Summary stats over years df_by_year.describe().head() # If you want to see what the grouping looks like, you can pass the _groupby_ object to the function `list()`: # Cast grouping as a list and check out one year list(df_by_year)[10] # <a id='step_3'></a> # ### Step 3. Combine # Let's say that you wanted the mean or median *user_rating_score* for each year. Then you can apply the `.mean()` or `.median()` method, respectively, to the *groupby* object and 'combine' these into a new DataFrame. # Get median values by year and print first 5 rows df_med_by_year = df_by_year.median() df_med_by_year.head() # There's an important subtlety concerning the index of the DataFrame `df_med_by_year`. Recall that the index of a DataFrame consists of the row labels. Check out the index of the original DataFrame `df`: # Print index of df print(df.index) # This index consists of the original row numbers, labelled by integers. '1' is missing as you dropped some rows above. The index of `df_med_by_year` consists of the values in the original column that you grouped by, the years from *release_year*: # Print index print(df_med_by_year.index) # You're interested in the *user_rating_score* column, which contains the median rating for each year. You can slice out the *user_rating_score* column of `df_med_by_year` and plot it as a function of the year (given by the index of the DataFrame `df_rat_by_year`): # Slice out user rating and plot df_rat_by_year = df_med_by_year['user_rating_score'] plt.scatter(df_rat_by_year.index, df_rat_by_year) plt.xlabel('year of release') plt.ylabel('median rating'); # Looking at the figure, the median rating definitely increases over time. You'd need to leverage some more sophisticated statistics to convince me of the trend in general but this an example of Exploratory Data Analysis being a great starting point for further research. # ## Groupbys and split-apply-combine in daily use # *Groupby* objects are not intuitive. They do, however, correspond to a natural the act of splitting a dataset with respect to one its columns (or more than one, but let's save that for another post about grouping by multiple columns and hierarchical indexes). # # The split-apply-combine principle is not only elegant and practical, it's something that Data Scientists use daily, as in the above example. To appreciate more of its uses, check out <NAME>'s original [paper](https://www.jstatsoft.org/article/view/v040i01) *The Split-Apply-Combine Strategy for Data Analysis*. If you have any thoughts, responses and/or ruminations, feel free to reach out to me on twitter: [@hugobowne](https://twitter.com/hugobowne).
split-apply-combine-netflix-data.ipynb
# --- # title: "Custom Lambda DataFrame" # author: "Charles" # date: 2020-08-10 # description: "-" # type: technical_note # draft: false # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = ['Vachitaya aapuuuuuuuuu','Sing in the rain' ,'Great power comes wih great responsibility'] df = pd.DataFrame(data, columns = ['Sent']) df def sent_length(message): return (len(message)) df['len'] = df['Sent'].apply(sent_length) df
docs/python/pandas/custom-lambda-dataframe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # In this exercise, you will use your new knowledge to propose a solution to a real-world scenario. To succeed, you will need to import data into Python, answer questions using the data, and generate **scatter plots** to understand patterns in the data. # # ## Scenario # # You work for a major candy producer, and your goal is to write a report that your company can use to guide the design of its next product. Soon after starting your research, you stumble across this [very interesting dataset](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) containing results from a fun survey to crowdsource favorite candies. # # ## Setup # # Run the next cell to import and configure the Python libraries that you need to complete the exercise. import pandas as pd pd.plotting.register_matplotlib_converters() import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns print("Setup Complete") # The questions below will give you feedback on your work. Run the following cell to set up our feedback system. # Set up code checking import os if not os.path.exists("../input/candy.csv"): os.symlink("../input/data-for-datavis/candy.csv", "../input/candy.csv") from learntools.core import binder binder.bind(globals()) from learntools.data_viz_to_coder.ex4 import * print("Setup Complete") # ## Step 1: Load the Data # # Read the candy data file into `candy_data`. Use the `"id"` column to label the rows. # + # Path of the file to read candy_filepath = "../input/candy.csv" # Fill in the line below to read the file into a variable candy_data candy_data = ____ # Run the line below with no changes to check that you've loaded the data correctly step_1.check() # - # #%%RM_IF(PROD)%% candy_data = pd.read_csv(candy_filepath, index_col="id") step_1.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_1.hint() #_COMMENT_IF(PROD)_ step_1.solution() # ## Step 2: Review the data # # Use a Python command to print the first five rows of the data. # Print the first five rows of the data ____ # Your code here # The dataset contains 83 rows, where each corresponds to a different candy bar. There are 13 columns: # - `'competitorname'` contains the name of the candy bar. # - the next **9** columns (from `'chocolate'` to `'pluribus'`) describe the candy. For instance, rows with chocolate candies have `"Yes"` in the `'chocolate'` column (and candies without chocolate have `"No"` in the same column). # - `'sugarpercent'` provides some indication of the amount of sugar, where higher values signify higher sugar content. # - `'pricepercent'` shows the price per unit, relative to the other candies in the dataset. # - `'winpercent'` is calculated from the survey results; higher values indicate that the candy was more popular with survey respondents. # # Use the first five rows of the data to answer the questions below. # + # Fill in the line below: Which candy was more popular with survey respondents: # '3 Musketeers' or 'Almond Joy'? (Please enclose your answer in single quotes.) more_popular = ____ # Fill in the line below: Which candy has higher sugar content: 'Air Heads' # or 'Baby Ruth'? (Please enclose your answer in single quotes.) more_sugar = ____ # Check your answers step_2.check() # - # #%%RM_IF(PROD)%% more_popular = '3 Musketeers' more_sugar = 'Air Heads' step_2.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_2.hint() #_COMMENT_IF(PROD)_ step_2.solution() # ## Step 3: The role of sugar # # Do people tend to prefer candies with higher sugar content? # # #### Part A # # Create a scatter plot that shows the relationship between `'sugarpercent'` (on the horizontal x-axis) and `'winpercent'` (on the vertical y-axis). _Don't add a regression line just yet -- you'll do that in the next step!_ # + # Scatter plot showing the relationship between 'sugarpercent' and 'winpercent' ____ # Your code here # Check your answer step_3.a.check() # - # #%%RM_IF(PROD)%% sns.scatterplot(x=candy_data['sugarpercent'], y=candy_data['winpercent']) step_3.a.assert_check_passed() # #%%RM_IF(PROD)%% sns.regplot(x=candy_data['sugarpercent'], y=candy_data['winpercent']) step_3.a.assert_check_failed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_3.a.hint() #_COMMENT_IF(PROD)_ step_3.a.solution_plot() # #### Part B # # Does the scatter plot show a **strong** correlation between the two variables? If so, are candies with more sugar relatively more or less popular with the survey respondents? #_COMMENT_IF(PROD)_ step_3.b.hint() #_COMMENT_IF(PROD)_ step_3.b.solution() # ## Step 4: Take a closer look # # #### Part A # # Create the same scatter plot you created in **Step 3**, but now with a regression line! # + # Scatter plot w/ regression line showing the relationship between 'sugarpercent' and 'winpercent' ____ # Your code here # Check your answer step_4.a.check() # - # #%%RM_IF(PROD)%% sns.regplot(x=candy_data['sugarpercent'], y=candy_data['winpercent']) step_4.a.assert_check_passed() # #%%RM_IF(PROD)%% sns.scatterplot(x=candy_data['sugarpercent'], y=candy_data['winpercent']) step_4.a.assert_check_failed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_4.a.hint() #_COMMENT_IF(PROD)_ step_4.a.solution_plot() # #### Part B # # According to the plot above, is there a **slight** correlation between `'winpercent'` and `'sugarpercent'`? What does this tell you about the candy that people tend to prefer? #_COMMENT_IF(PROD)_ step_4.b.hint() #_COMMENT_IF(PROD)_ step_4.b.solution() # ## Step 5: Chocolate! # # In the code cell below, create a scatter plot to show the relationship between `'pricepercent'` (on the horizontal x-axis) and `'winpercent'` (on the vertical y-axis). Use the `'chocolate'` column to color-code the points. _Don't add any regression lines just yet -- you'll do that in the next step!_ # + # Scatter plot showing the relationship between 'pricepercent', 'winpercent', and 'chocolate' ____ # Your code here # Check your answer step_5.check() # - # #%%RM_IF(PROD)%% sns.scatterplot(x=candy_data['pricepercent'], y=candy_data['winpercent'], hue=candy_data['chocolate']) step_5.assert_check_passed() # #%%RM_IF(PROD)%% sns.scatterplot(x=candy_data['pricepercent'], y=candy_data['winpercent']) step_5.assert_check_failed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_5.hint() #_COMMENT_IF(PROD)_ step_5.solution_plot() # Can you see any interesting patterns in the scatter plot? We'll investigate this plot further by adding regression lines in the next step! # # ## Step 6: Investigate chocolate # # #### Part A # # Create the same scatter plot you created in **Step 5**, but now with two regression lines, corresponding to (1) chocolate candies and (2) candies without chocolate. # + # Color-coded scatter plot w/ regression lines ____ # Your code here # Check your answer step_6.a.check() # - # #%%RM_IF(PROD)%% sns.scatterplot(x=candy_data['pricepercent'], y=candy_data['winpercent']) step_6.a.assert_check_failed() # #%%RM_IF(PROD)%% sns.lmplot(x="pricepercent", y="winpercent", hue="chocolate", data=candy_data) step_6.a.assert_check_passed() # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_6.a.hint() #_COMMENT_IF(PROD)_ step_6.a.solution_plot() # #### Part B # # Using the regression lines, what conclusions can you draw about the effects of chocolate and price on candy popularity? #_COMMENT_IF(PROD)_ step_6.b.hint() #_COMMENT_IF(PROD)_ step_6.b.solution() # ## Step 7: Everybody loves chocolate. # # #### Part A # # Create a categorical scatter plot to highlight the relationship between `'chocolate'` and `'winpercent'`. Put `'chocolate'` on the (horizontal) x-axis, and `'winpercent'` on the (vertical) y-axis. # + # Scatter plot showing the relationship between 'chocolate' and 'winpercent' ____ # Your code here # Check your answer step_7.a.check() # - # #%%RM_IF(PROD)%% sns.swarmplot(x=candy_data['chocolate'], y=candy_data['winpercent']) step_7.a.assert_check_passed() # + # #%%RM_IF(PROD)%% #sns.swarmplot(x=candy_data['chocolate'], y=candy_data['sugarpercent']) #step_7.a.assert_check_failed() # + # #%%RM_IF(PROD)%% #sns.swarmplot(x=candy_data['fruity'], y=candy_data['winpercent']) #step_7.a.assert_check_failed() # - # Lines below will give you a hint or solution code #_COMMENT_IF(PROD)_ step_7.a.hint() #_COMMENT_IF(PROD)_ step_7.a.solution_plot() # #### Part B # # You decide to dedicate a section of your report to the fact that chocolate candies tend to be more popular than candies without chocolate. Which plot is more appropriate to tell this story: the plot from **Step 6**, or the plot from **Step 7**? #_COMMENT_IF(PROD)_ step_7.b.hint() #_COMMENT_IF(PROD)_ step_7.b.solution() # ## Keep going # # Explore **[histograms and density plots](#$NEXT_NOTEBOOK_URL$)**.
notebooks/data_viz_to_coder/raw/ex4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + import sys sys.path.append('..') # + import sqlite3 from dataclasses import dataclass, field from pathlib import Path from pprint import pp from herbarium.pylib import db # + DATA = Path('..') / 'data' MODELS = DATA / 'models' IMAGES = DATA / 'images' DB = DATA / 'angiosperms.sqlite' # - images = db.select_images(DB) batch = [(p['path'].replace('/all', ''), p['path']) for p in images] sql = """update images set path = ? where path = ?;""" with sqlite3.connect(DB) as cxn: cxn.executemany(sql, batch)
notebooks/08_infer_traits.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Keras # language: python # name: keras # --- # # [Install pycocotools](https://github.com/ildoonet/tf-pose-estimation/issues/458) # - step 1: While executing requirements.txt file remove pycocotools and proceed with rest of the installation. # - step 2: Run command -> pip install --upgrade cython # - step 3: Use this link to clone the recent coco module: https://github.com/philferriere/cocoapi # (git clone https://github.com/philferriere/cocoapi ) # # - step 4: cd cocoapi/PythonAPI/ # - step 5: python setup.py install # + # # !pip install --upgrade cython # # !git clone https://github.com/philferriere/cocoapi # # !cd cocoapi/PythonAPI/ # # !python setup.py install # # !sudo rm -fr ../../../cocoapi # - import matplotlib.pyplot as plt import cv2 import numpy as np import json import glob import os # %matplotlib inline plt.rcParams['figure.figsize'] = (10, 8) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval coco = COCO("pascal_train.json") # load training annotations coco.cats # check the category # + image_ids = [] imgs_keys = list(coco.imgs.keys()) for i in imgs_keys: image_ids.append (coco.imgs[i]['file_name'][:-4]) #image_ids # - # # Extract Annotations from Pascal VOC 2012 trainval dataset # ## Tiny VOC 2012 dataset only contains 1349 images # + ## Make sure the path is correct. # # !wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar # # !tar xvf VOCtrainval_11-May-2012.tar # - Annotations = os.listdir('./VOCdevkit/VOC2012/Annotations') counts = 0 for file in (Annotations): if file[:-4] not in image_ids: try: os.remove('./VOCdevkit/VOC2012/Annotations/'+str(file)) except OSError as e: print(e) counts +=1 counts #print(str(file[:-4])) Class = os.listdir('./VOCdevkit/VOC2012/SegmentationClass') counts = 0 for file in (Class): if file[:-4] not in image_ids: try: os.remove('./VOCdevkit/VOC2012/SegmentationClass/'+str(file)) except OSError as e: print(e) counts +=1 counts #print(str(file[:-4])) Object = os.listdir('./VOCdevkit/VOC2012/SegmentationObject') counts = 0 for file in (Object): if file[:-4] not in image_ids: try: os.remove('./VOCdevkit/VOC2012/SegmentationObject/'+str(file)) except OSError as e: print(e) counts +=1 counts #print(str(file[:-4])) image = os.listdir('./VOCdevkit/VOC2012/JPEGImages') counts = 0 for file in (image): if file[:-4] not in image_ids: try: os.remove('./VOCdevkit/VOC2012/JPEGImages/'+str(file)) except OSError as e: print(e) counts +=1 counts #print(str(file[:-4])) # # Rename the VOCdevkit directory as VOCdevkit_Tiny # # Rename the VOCdevkit_Tiny/VOC2012/JPEGImages as train_images # + # #!mv VOCdevkit/ VOCdevkit_Tiny # #!mv VOCdevkit_Tiny/JPEGImages/VOC2012/JPEGImages/ train_images # - # # Create training data label # *You have to create the validation data by your own* train_label = open('./VOCdevkit_Tiny/VOC2012/train.txt', 'w') for i in sorted(image_ids): train_label.write(i+'\n') #test.write(str(image_ids)) train_label.close() imgIds = 5 # Use the key above to retrieve information of the image img_info = coco.loadImgs(ids=imgIds) print(img_info) image = cv2.imread('./VOCdevkit_Tiny/VOC2012/train_images/'+ str(img_info[-1]['file_name'])) image = image[:,:,::-1] # BGR -> RGB plt.imshow(image) # # Get VOC_Tiny dataset Image RGB mean a = 0 for i in image_ids: image = plt.imread('./VOCdevkit_Tiny/VOC2012/train_images/'+ str(i) + '.jpg') a = np.mean(image,axis=(0,1)) + a #plt.figure() #plt.imshow(image) a / len(image_ids) # Use the imgIds to find all instance ids of the image annids = coco.getAnnIds(imgIds=imgIds) print(annids) anns = coco.loadAnns(annids) print("Number of instances: ", len(annids)) instance_id = 0 print(anns[instance_id].keys()) # check the information of the first instance of the image print("Polygons of segmenatation: ", anns[instance_id]['segmentation'][0][:18]) print("Image id of this instance: ", anns[instance_id]['image_id']) print("Bounding box of this instance: ", anns[instance_id]['bbox']) print("Category_id: ", anns[instance_id]['category_id']) # # Visualization plt.subplots(nrows=2, ncols=3, figsize=(18, 12)) for i in range(len(annids)): mask = coco.annToMask(anns[i]) cate = anns[i]['category_id'] plt.subplot(2, 3, i+1) plt.title("Instance {}, category={}".format(i+1, coco.cats[cate]['name'])) plt.imshow(mask)
samples/VOC/data_loader.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df_psy = pd.read_csv('Youtube01-Psy.csv') df_psy.head() df_psy.shape # 5 columns and 350 rows df_psy.count() # No Null values present here ;) df_kp = pd.read_csv('Youtube02-KatyPerry.csv') df_kp.head() df_kp.shape df_kp.count() # No Null values present here also ;) df_lmfao = pd.read_csv('Youtube03-LMFAO.csv') df_lmfao.head() df_lmfao.shape df_lmfao.count() # No Null values present here also ;) df_em = pd.read_csv('Youtube04-Eminem.csv') df_em.head() df_em.shape df_em.count() # Some missing values are there in the date column df_sk = pd.read_csv('Youtube05-Shakira.csv') df_sk.head() df_sk.shape df_sk.count() # No missing values ;) # Trying to merge all the datasets into one so that we'll have one large dataset data_frames = [df_psy, df_kp, df_lmfao, df_em, df_sk] # As we can see all the dataset have the same column names luckily, COMMENT_ID, AUTHOR, DATE, CONTENT, CLASS by this it is easy for us to use the .concat command by pandas to that it will concatenate all the data according to their respective column starting from the "df_psy" up to the "df_sk" concat_df = pd.concat(data_frames) concat_df.head() concat_df.shape concat_df.count() # Look we've now a total of 1956 rows, that is sum up from the separately of 5 datasets. # The DATE column was having some missing values in the 'Youtube04-Eminem' dataset. concat_df # Assigning keys to every datasets' comments in the concatenated dataset so that if we need to see the comments of a specific person or say dataset it would be easy for us to do keys = ['psy', 'kattyPerry', 'lmfao', 'eminem', 'shakira'] # by default the keys parameter is set to none final_dataframe = pd.concat(data_frames, keys=keys) final_dataframe.count() # Nothing is changed with the dataset's columns and rows # But now we can see the comments of <NAME> from the final large dataset final_dataframe.loc['kattyPerry'] # Look in above code it print out the comments ralted to the key==kattyPerry. # And for confirmation I run this line of code df_kp.head() # Now save the final_dataframe into a csv file by the pandas method to_csv final_dataframe.to_csv('final_dataframe.csv') final_dataframe.size # ## Cleaning the data final_dataframe.columns final_dataframe.dtypes final_dataframe.isnull().sum() final_dataframe.DATE final_dataframe.AUTHOR # ### Contents # # As if look up the we can observe that from the contents, we'll classiy the comments as spam or not spam. # Like the other attributes doesn't matter that much. As the one is name, we don't care about who is the author. Also in case of # date it also doesn't matter like on which date it has been posted or something else. # So for us the 'CONTENT' and the 'CLASS' columns are most important. # data_df = final_dataframe[['CONTENT', 'CLASS']] data_df.head() data_df.count() data_df.columns data_feature = data_df['CONTENT'] data_label = data_df['CLASS'] # ### Extracting features from text # ML Packages For Vectorization of Text For Feature Extraction from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer # Extracting the features with the CountVectorizer() corpus = data_feature vectorizer = CountVectorizer() # fit the data (feature) which is in corpus X = vectorizer.fit_transform(corpus) # Here are the features it extracted from the data vectorizer.get_feature_names() X.toarray() # Now we extracted features from the data. And as we know the models perform well over the numbers and guess what, we've now # numbers in the form of array of arrays. And so these numbers i-e 'X' will be act as features while the corresponding labels are stored # in the variable 'data_label' # ### Building the Model # for splitting the data into training and testing part from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, data_label, test_size=0.33, random_state=42) X_train X_train.shape # Look we've now 4454 of total features to predict the spam or not spam class of a comment X_train # The Standard Naive Bayes Classifier for the Spam filtering from sklearn.naive_bayes import MultinomialNB clf = MultinomialNB() clf.fit(X_train, y_train) accuracy = clf.score(X_test, y_test) print(accuracy*100, '%') # #### Prediction clf.predict(X_test) # - 0 ==> Not spam (Ham) # - 1 ==> Spam # Sample Prediction # As we need to provide the data or say text to the model in vectorize form (as we did) so for the future data or prediction, we should convert that data first into vectorize form. predict_comment = ["Click on this link"] vectorizer_predict = vectorizer.transform(predict_comment).toarray() clf.predict(vectorizer_predict) # + # predict_comment_2 = ["Awesome song love you!"] # vectorizer_predict_2 = vectorizer.transform(predict_comment_2).toarray() # clf.predict(vectorizer_predict_2) # - if clf.predict(vectorizer_predict) == 1: print('Spam') elif clf.predict(vectorizer_predict) == 0: print('Not Spam/ Ham') # ### Saving Model import pickle NBC_Model = open("NBC_Model.pkl", "wb") pickle.dump(clf, NBC_Model) NBC_Model.close()
ML Model/.ipynb_checkpoints/Spam_Comment_Detector-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Supervised learning of a simple genetic network in *E. coli* # Content here is licensed under a CC 4.0 License. The code in this notebook is released under the MIT license. # # # By <NAME>. # + import grn_learn as g import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import networkx as nx import matplotlib as mpl from scipy.stats import pearsonr import bebi103 #jbois' library import hvplot import hvplot.pandas import holoviews as hv from holoviews import dim, opts import bokeh_catplot import bokeh import bokeh.io from bokeh.io import output_file, save, output_notebook output_notebook() hv.extension('bokeh') np.random.seed(42) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" g.set_plotting_style() # %matplotlib inline # %config InlineBackend.figure_format = 'svg' # %load_ext autoreload # %autoreload # - # ### Load in data RNA-seq dataset. # Story of the data. Citation : y-ome. df = pd.read_csv('palsson_rna_seq.csv') df.head() data_ = df.copy() annot = data_.iloc[:, :2] annot.head() data = data_.iloc[:, 2:] # ### Data preprocessing. # Let's start our data analysis pipeline by normalizing and looking for null values . from sklearn.preprocessing import StandardScaler as scaler ss = scaler() norm_data = ss.fit_transform(data) # Let's check if the data has any null entries. norm_data= pd.DataFrame(norm_data, columns = data.columns) norm_data.describe() # It looks like there are none. We can quickly verify this using the `pd.notnull` function from pandas. np.all(pd.notnull(norm_data)) # All right, we're good to go ! # ### Load in PurR regulon datasets # Now we can go ahead and load the PurR regulon datasets. purr_regulondb = pd.read_csv('../../data/purr_regulon_db.csv') purr_hi = pd.read_csv('../../data/purr_regulon_hitrn.csv') print('The RegulonDB has %d nodes and the hiTRN has %d nodes \ for the PurR regulon genetic network respectively.'%(purr_regulondb.shape[0], purr_hi.shape[0])) # Let's extract the TGs as a `np.array` and get the genes that were discovered by the Palsson Lab. purr_rdb_tgs = np.unique(purr_regulondb.tg.values) len(purr_rdb_tgs) # + purr_hi_tgs = np.unique(purr_hi.gene.values) purr_hi_tgs = [gene.lower() for gene in purr_hi_tgs] # + new_purr_tgs = set(purr_hi_tgs) - set(purr_rdb_tgs) new_purr_tgs # - # We can see that indeed the hiTRN has 5 more interactions. Let's see if we can accurately predict this interactions directly from the RNA-seq data. # ### Visualize correlation # Before jumping to applying an ML model to our data, let's proceed to make a simple EDA. As I've said in the presentation the notion that makes this approach biologically plausible is that **genes that are coexpressed are probably corregulated**. A simple proxy for coexpression is correlation across expression conditions. # # Let's make a couple of plots to see that indeed the test genes that we're looking for are correlated with purr, and if this relationship looks linear. We'll use the Seaborn library in this case because it has a nice feat that allows to embed a statistical function into the plot. def corr_plot(data, gene_1, gene_2): """ Scatter plot to devise correlation. Parameters ----------- * data(pd.DataFrame): Input dataframe that contains for which to pull out data. * gene_x (str): gene_name of the genes to visualize. Returns --------- * fig (plt.figure) : sns.jointplot hardcoded to be a scatterplot of the genes. """ gene_1_data = data[data['gene_name'] == gene_1] assert gene_1_data.shape[0] ==1, 'Gene 1 not in dataset' gene_1_vals = gene_1_data.iloc[:, 3:].values.T gene_2_data = data[data['gene_name'] == gene_2] assert gene_2_data.shape[0] ==1, 'Gene 2 not in dataset' gene_2_vals = gene_2_data.iloc[:, 3:].values.T df_plot = pd.DataFrame({gene_1: gene_1_vals.flatten(), gene_2 : gene_2_vals.flatten()}) plt.figure(figsize = (6, 4)) fig = sns.jointplot(data = df_plot, x = gene_1, y = gene_2, stat_func = pearsonr, alpha = 0.5, color = 'dodgerblue'); return fig # We can now iterate over the putative TGs and plot them against PurR. In the following plots, each dot represents the expression level (in [FPKM](https://www.rna-seqblog.com/rpkm-fpkm-and-tpm-clearly-explained/), a proxy for the number of mRNA counts for a given gene) of both genes in a specific expression condition. for new_tg in new_purr_tgs: corr_plot(df, 'purr', new_tg); # We can see that some, but not all the genes are strongly correlated with PurR. This is normal because the TRN has a lot of feedback so it could be that despite that PurR regulates a given gene, there are potentially other TFs controlling those target genes. # ### Filter noise using PCA. # Principal component analysis is a widely used technique in unsupervised learning to perform dimensionality reduction. One can also use PCA as a "noise reduction" technique because projecting into a (smaller) latent space and reconstructing the dataset from this space with smaller dimensionality forces the algorithm to learn important features of the data. Specifically the latent space (the principal components) will maximize the variance across the dataset. # # First, let's explore the dimensionality of our RNA-seq dataset. from sklearn.decomposition import PCA pca = PCA() pca = pca.fit(norm_data) # + cum_exp_var = np.cumsum(pca.explained_variance_ratio_) # look at it plt.figure(figsize = (6,4)) plt.plot(cum_exp_var*100, color = 'dodgerblue') #because LA plt.xlabel('Number of dimensions', fontsize= 16) plt.ylabel('Cumulative variance percentage', fontsize = 16) plt.title('PCA Explained Variance'); # - print('The first five principal components explain %.2f of the variance in the dataset.'%cum_exp_var[4]) # We can see that the dataset is of very small dimensionality. We can now project into this subspace that contains 95% of the variance and reconstruct the dataset. pca = PCA(0.95).fit(norm_data) latent = pca.transform(norm_data) reconstructed = pca.inverse_transform(latent) recon_df= pd.DataFrame(reconstructed, columns = data.columns) df.iloc[:, :2].shape, recon_df.shape recon_df_ = pd.concat([df.iloc[:, :2], recon_df], axis = 1) recon_df_.head() # ### Visualize correlation again. # Let's visualize the dataset again. for new_tg in new_purr_tgs: corr_plot(recon_df_, 'purr', new_tg); # We can see that in the reconstructed space, we've constrained the data to have a bigger covariance. # ### Visualize in PCA space # Given that we already have the projection of our dataset into a smaller dimension, we can also visualize all of the genes in the first two principal components. hv.Points((latent[: , 0], latent[: , 1])).opts(xlabel = 'principal component 1', ylabel = 'principal component 2', color = '#1E90FF', size = 5, alpha = 0.15, padding = 0.1, width = 400) # We cannot really see a specific structure in the first two components. Maybe a non-linear dimensionality reduction technique such as UMAP could do a better job to get the clusters in higher dimensions. We'll come back to that in the next tutorial. # ### Annotate datasets # Now that we have preprocessed our data we can proceed to annotate it. Specifically we want to label our data for each gene, if its inside the PurR regulon or not. # # First-off, let's generate our test set. We'll use a helper function that let's us filter from the dataframe. def get_gene_data(data, gene_name_column, test_gene_list): """ Extract data from specific genes given a larger dataframe. Parameters ------------ * data (pd.DataFrame): large dataframe from where to filter. * gene_name_column (str): column to filter from in the dataset. * test_gene_list (array-like) : a list of genes you want to get. Returns --------- * gene_profiles (pd.DataFrame) : dataframe with the genes you want """ gene_profiles = pd.DataFrame() for gene in data[gene_name_column].values: if gene in test_gene_list: df_ = data[(data[gene_name_column] == gene)] gene_profiles = pd.concat([gene_profiles, df_]) gene_profiles.drop_duplicates(inplace = True) return gene_profiles # Let's make a one hot encoded vector that corresponds to being an element of the PurR regulon. one_hot = [1 if row in purr_hi_tgs else 0 for row in recon_df_['gene_name'].values] recon_df_['output'] = one_hot recon_df_.head() test_purr_tgs = list(new_purr_tgs) test = get_gene_data(recon_df_, 'gene_name', test_purr_tgs) test.head() type(x) # Let's drop these test genes from the reconstructed dataset. recon_df_non_regulon = recon_df_.copy().drop(test_.index.to_list()) # Nice! Now we can go ahead and add some "noise" to our test dataset, in the sense that we need to test if our algorithm can point out negative examples. noise = recon_df_non_regulon.sample(n = 30, replace = False, axis = 0, random_state = 42) # Let's merge both of this dataframes to get an unbiased test set. df_test_unb = pd.concat([test, noise]) ## unbiased test df_test_unb.shape df_test_unbiased = df_test_unb.copy().reset_index(drop= True) df_test_unbiased.head() df_test_unbiased.shape df_train = recon_df_non_regulon.copy() # ### Train - test split df_train.head() df_test_unbiased.head() df_train.shape df_test_unbiased.shape X_train = df_train.iloc[:, 2: -1].values y_train = df_train.iloc[:, -1].values X_train[:5, :5] y_train[:5] # + X_test = df_test_unbiased.iloc[:, 2:-1].values y_test = df_test_unbiased.iloc[:, -1].values # - X_test[:5, :5] y_test[:5] # ### Balance dataset using SMOTE pd.Series(y_train).value_counts() pd.Series(y_test).value_counts() # + from imblearn.over_sampling import SMOTE #resampling is done on training dataset only X_train_res, y_train_res = SMOTE(random_state = 42).fit_sample(X_train, y_train) # - # ### Linear SVM from sklearn.svm import LinearSVC linear_svm_clf = LinearSVC() linear_svm_clf.fit(X_train_res, y_train_res) predictions = linear_svm_clf.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(y_test, predictions) from sklearn.metrics import classification_report pd.DataFrame((print(classification_report(y_test, predictions)))) predictions == y_test # We ca # ### Random forest from sklearn.ensemble import AdaBoostClassifier ada = AdaBoostClassifier() ada.fit(X_train, y_train) ada_pred = ada.predict(X_test) print(classification_report(y_test, ada_pred)) # Probably overfit. # ### Keras neural net. from keras.models import Sequential from keras.layers import Dense from keras.metrics import categorical_accuracy X_test.shape[1] # + model = Sequential() model.add(Dense(units=64, activation='softmax', input_dim= X_test.shape[1])) model.add(Dense(units=1)) # one output model.compile(loss='mse', optimizer='RMSprop', metrics= ['accuracy']) history = model.fit(X_train_res, y_train_res, epochs=10, batch_size=32) accuracy = history.history['acc'] # - # ### Cross-validation # + #from sklearn.model_selection import accuracy_score # - from sklearn.metrics # + # cross_val_score? # - cross_val_score(linear_svm_clf, X_train, y_train, cv = 5) # ### Make pipeline from sklearn.pipeline import make_pipeline from sklearn.compose import ColumnTransformer, make_column_transformer df_train.head() df_test_unbiased.head() df_master = pd.concat([df_train, df_test_unbiased]) df_master.tail() # + # make_pipeline? # - pipe = make_pipeline(scaler(), LinearSVC()) pipe pipe.fit(X_train, y_train) preds = pipe.predict(X_test) preds == y_test from sklearn.metrics import confusion_matrix sns.heatmap(confusion_matrix(y_test, preds) / confusion_matrix(y_test, preds).sum(axis = 0), cmap = 'viridis_r')
code/tutorial/supervised_learning_genetic_network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PyTorch ML library # # An open source machine learning framework that accelerates the path from research prototyping to production deployment. # # ![PyTorch](https://upload.wikimedia.org/wikipedia/commons/9/96/Pytorch_logo.png) # + # conda install pytorch torchvision cudatoolkit=10.1 -c pytorch # - # ## Cheat Sheet # # * https://pytorch.org/tutorials/beginner/ptcheat.html # ### https://pytorch.org/get-started/locally/ from __future__ import print_function import torch x = torch.rand(5, 3) print(x) torch.cuda.is_available() # + # Cloud Partner Alternatives (mosty paid) # https://pytorch.org/get-started/cloud-partners/ # - # # Tutorial for those who know NumPy # # https://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html x = torch.empty(5, 3) print(x) x = torch.rand(5, 3) print(x) x = torch.zeros(5, 3, dtype=torch.long) print(x) x = torch.tensor([5.5, 3]) print(x) x = torch.tensor([[5.5, 3],[3.1,60.3]]) print(x) import numpy as np x = torch.tensor(np.random.rand(3,6,2)) print(x) x.size() # Addition: providing an output tensor as argument result = torch.empty(5, 3) y = torch.rand(5, 3) x = torch.rand(5, 3) torch.add(x, y, out=result) print(result) # adds x to y in place y.add_(x) print(y) # + # Any operation that mutates a tensor in-place is post-fixed with an _. # For example: x.copy_(y), x.t_(), will change x. # - # Resizing: If you want to resize/reshape tensor, you can use torch.view x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) # If you have a one element tensor, # use .item() to get the value as a Python number x = torch.randn(1) print(x) print(x.item()) # ## Converting a Torch Tensor to a NumPy Array a = torch.ones(5) print(a) b = a.numpy() print(b,type(b)) # See how the numpy array changed in value. a.add_(1) print(a) print(b) # ## Converting NumPy Array to Torch Tensor # # See how changing the np array changed the Torch Tensor automatically # import numpy as np a = np.ones(5) b = torch.from_numpy(a) np.add(a, 1, out=a) print(a) print(b) a = np.ones(5) b = torch.tensor(a) np.add(a, 1, out=a) print(a) print(b) # + ## All the Tensors on the CPU except a CharTensor support converting to NumPy and back. # - # ## CUDA Tensors # Tensors can be moved onto any device using the .to method. # let us run this cell only if CUDA is available # We will use ``torch.device`` objects to move tensors in and out of GPU if torch.cuda.is_available(): device = torch.device("cuda") # a CUDA device object y = torch.ones_like(x, device=device) # directly create a tensor on GPU x = x.to(device) # or just use strings ``.to("cuda")`` z = x + y print(z) print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! # ## Next: AutoGrad # * https://pytorch.org/tutorials/beginner/blitz/autograd_tutorial.html
PyTorch_ML_Library/PyTorch Introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split data = load_boston() X_train, X_test, y_train, y_test = train_test_split(data['data'], data['target']) from sklearn.preprocessing import StandardScaler, RobustScaler, QuantileTransformer from sklearn.feature_selection import SelectKBest, f_regression from sklearn.decomposition import PCA from sklearn.linear_model import Ridge scaler = StandardScaler() pca = PCA() ridge = Ridge() X_train = scaler.fit_transform(X_train) X_train = pca.fit_transform(X_train) ridge.fit(X_train, y_train) # Too repetitive! ... from sklearn.pipeline import Pipeline pipe = Pipeline([ ('scaler', StandardScaler()), ('reduce_dim', PCA()), ('regressor', Ridge()) ]) pipe = pipe.fit(X_train, y_train) print('Testing score: ', pipe.score(X_test, y_test)) print(pipe.steps[1][1].explained_variance_) # concerning PCA, let's evaluate accuracy variance with # components from 1,10 import numpy as np n_features_to_test = np.arange(1, 11) alpha_to_test = 2.0**np.arange(-6, +6) params = {'reduce_dim__n_components': n_features_to_test,\ 'regressor__alpha': alpha_to_test} from sklearn.model_selection import GridSearchCV gridsearch = GridSearchCV(pipe, params, verbose=1).fit(X_train, y_train) print('Final score is: ', gridsearch.score(X_test, y_test)) gridsearch.best_params_ # pipeline tuning - advanced scalers_to_test = [StandardScaler(), RobustScaler(), QuantileTransformer()] params = {'scaler': scalers_to_test, 'reduce_dim__n_components': n_features_to_test,\ 'regressor__alpha': alpha_to_test} params = [ {'scaler': scalers_to_test, 'reduce_dim': [PCA()], 'reduce_dim__n_components': n_features_to_test,\ 'regressor__alpha': alpha_to_test}, {'scaler': scalers_to_test, 'reduce_dim': [SelectKBest(f_regression)], 'reduce_dim__k': n_features_to_test,\ 'regressor__alpha': alpha_to_test} ] # launch gridsearch again gridsearch = GridSearchCV(pipe, params, verbose=1).fit(X_train, y_train) print('Final score is: ', gridsearch.score(X_test, y_test)); gridsearch.best_params_
w7/w7d1/Pipelines_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark.sql import SparkSession,DataFrame import os import numpy as np from pyspark.sql import functions as f from pyspark.sql.types import * import io import time from pyspark.sql import Row local=True # spark.rpc.message.maxSize if for write large csv file. The default value is 128, here we set it to 1024 if local: spark = SparkSession \ .builder.master("local[4]") \ .appName("SparkArrowCompression") \ .config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1') \ .getOrCreate() else: spark = SparkSession \ .builder.master("k8s://https://kubernetes.default.svc:443") \ .appName("SparkArrowCompression") \ .config("spark.kubernetes.container.image", "inseefrlab/jupyter-datascience:master") \ .config("spark.kubernetes.authenticate.driver.serviceAccountName", os.environ['KUBERNETES_SERVICE_ACCOUNT']) \ .config("spark.executor.instances", "4") \ .config("spark.executor.memory","8g") \ .config("spark.kubernetes.namespace", os.environ['KUBERNETES_NAMESPACE']) \ .config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1') \ .getOrCreate() spark.sparkContext.stop() print(spark.sparkContext.version) # ! kubectl get pods # ! kubectl get pods | grep sparkarrow | awk '{print $1}' | xargs kubectl delete pods # + data = [("xiaomi", "2007"), ("xiaomi 3G","2008"), ("xiaomi 3GS","2009"), ("xiaomi 4","2010"), ("xiaomi 4S","2011"), ("xiaomi 5","2012"), ("xiaomi 8","2014"), ("xiaomi 3GS","2009"), ("xiaomi 10","2017") ] df = spark.createDataFrame(data).toDF("key","value") # - df.show() df.printSchema() df.write \ .format("kafka") \ .option("kafka.bootstrap.servers","kafka-server.user-pengfei.svc.cluster.local:9092") \ .option("topic","test_topic") \ .save() # Subscribe to 1 topic, with headers df = spark \ .readStream \ .format("kafka") \ .option("kafka.bootstrap.servers", "kafka-server.user-pengfei.svc.cluster.local:9092") \ .option("subscribe", "test_topic") \ .option("includeHeaders", "true") \ .load() df = spark \ .read \ .format("kafka") \ .option("kafka.bootstrap.servers", "kafka-server.user-pengfei.svc.cluster.local:9092") \ .option("subscribe", "test_topic") \ .load() df.printSchema() df2 = df.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)","topic","partition","offset","timestamp","timestampType") df2.show(truncate=False)
notebook/SparkReadWriteKafka.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import os import cv2 cwd = os.getcwd() #get my path\n" # - facial_keypoints = pd.read_csv(cwd+'\\Dataset\\Facial 20k\\Landmarks\\all_landmarks.txt', header=None, delim_whitespace=True, skiprows=[20621]) # + path = cwd+'\\Dataset\\Facial 20k\\Cropped face\\18-35\\' #path = cwd+'\\Dataset\\Facial 20k\\crop_part2\\' keypoints = [] filenames = [] training_data = [] count = 0 # get keypoints for each image for f_name in os.listdir(path): img_array = cv2.imread(os.path.join(path,f_name) ,cv2.IMREAD_GRAYSCALE) # convert to array points = facial_keypoints.loc[facial_keypoints[0] == f_name[:-9]] if points.empty == False: # if keypoints are not missing # extract keypoints keypoints = points.drop([0], axis = 1).to_numpy() keypoints = keypoints.reshape(68, 2) training_data.append([img_array, keypoints]) else: count += 1 print("missing value: ", count) # - facial_keypoints1.shape x = training_data[2][0] y = training_data[2][1] plt.imshow(x, cmap='gray') plt.scatter( y[ : , 0 ] , y[ : , 1 ] , c='yellow' ) X = [] y = [] for image,keypoints in training_data: X.append(image) y.append(keypoints) X = np.array(X).reshape(-1, 200, 200, 1) X.shape y = np.array(y) y.shape # + import pickle pickle_out = open("X.pickle","wb") pickle.dump(X, pickle_out) pickle_out.close() pickle_out = open("y.pickle","wb") pickle.dump(y, pickle_out) pickle_out.close()
Facial Landmark Detection/.ipynb_checkpoints/Data Preparation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PyTorch # language: python # name: pytorch # --- # + # Author - <NAME> # Date - 04/05/18 # Version - 2.3 import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable import pandas as pd from sklearn.datasets import load_iris import numpy as np from sklearn import preprocessing import matplotlib.pyplot as plt #importing graph plotting functionality # import os # print(os.listdir("../input")) # Load dataset df = pd.read_csv("Datasets/car_evaluation.csv", names = ["buying","maint", "doors", "persons", "lug_boot","safety","class"]) # - # ## Preprocessing the data # # Here, the dataset contains of 6 attributes and 1 class column having 4 class values{unacc, acc, good, vgood}. As we are building a neural network we need to provide the neural node values it can read and not bias over a specific value of an attribute. Therefore we convert all the nominal/categorical data into numeric by using **pandas.get_dummies** function. This function will create additional columns of each values corresponding to each attribute, therefore increasing the number of total columns. ## get_dummies() implementation category_col =["buying","maint", "doors", "persons", "lug_boot","safety","class"] df = pd.get_dummies(df, columns=category_col) df.to_csv('Datasets/car_evaluation_preprocessed.csv',index=False) ## visualizing processed dataset print(df.shape) df.head(10) # #### Dividing the dataset into Attribute and labels, then spliting into train and test using crossvalidation X = df.iloc[:, 0:21].values y = df.iloc[:, 21:].values ## Normalizing data - Normalization refers to rescaling real valued numeric attributes into the range 0 and 1. X = preprocessing.scale(X) from sklearn.model_selection import train_test_split feature_train, feature_test, labels_train, labels_test = train_test_split(X, y, random_state = 42) print ("Train:%d + Test:%d = Total:%d" % (len(feature_train),len(feature_test),len(feature_train)+len(feature_test))) # ## Building the NN classifier using PyTorch # + feature_train_v = Variable(torch.FloatTensor(feature_train), requires_grad = False) labels_train_v = Variable(torch.FloatTensor(labels_train), requires_grad = False) feature_test_v = Variable(torch.FloatTensor(feature_test), requires_grad = False) labels_test_v = Variable(torch.FloatTensor(labels_test), requires_grad = False) class LinearClassifier(nn.Module): def __init__(self): super(LinearClassifier, self).__init__() self.h_layer = nn.Linear(21, 4) #21 input layers and 4 output layers self.s_layer = nn.Softmax() def forward(self,x): y = self.h_layer(x) p = self.s_layer(y) return p #declaring the classifier to an object model = LinearClassifier() #calculates the loss loss_fn = nn.BCELoss() optim = torch.optim.SGD(model.parameters(), lr = 0.01) # - # #### Now we fit the raining data into the model, here we do 5000 iterations and collect the loss of each iteration all_losses = [] for num in range(5000): pred = model(feature_train_v) #predict loss = loss_fn(pred, labels_train_v) #calculate loss all_losses.append(loss.data) optim.zero_grad() #zero gradients to not accumulate loss.backward() #update weights based on loss optim.step() #update optimiser for next iteration # ## Visualizing the loss per each iteration all_losses = np.array(all_losses, dtype = np.float) all_losses plt.plot(all_losses) plt.show() print(pred[3]) print(labels_train_v[3]) print(all_losses[-1]) # ## Accuracy result from testing data on the model # # Now we fit the test dataset on our model and find the score of each correctly labeled data by the Neural Network model and find the accuracy. from sklearn.metrics import accuracy_score predicted_values = [] for num in range(len(feature_test_v)): predicted_values.append(model(feature_test_v[num])) score = 0 for num in range(len(predicted_values)): if np.argmax(labels_test[num]) == np.argmax(predicted_values[num].data.numpy()): score = score + 1 accuracy = float(score / len(predicted_values)) * 100 print ('Testing Accuracy Score is ' + str(accuracy))
Script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # DBSCAN Density Clustering # # Important concepts: # - x is a core point if there are at least minpts in its e-neighborhood, where e is the euclidian distance, and both minpts and e must be specified ahead of time. # - border point is defined as a point that does not meet the minpts threshold, but belongs to the neighborhood of some core point. # - if a point is neither a border or a core point (not in the neighborhood of a core point), then it is a noise point. # - two points x,y are density connected if there is a point z, and both x and y are density reachable from z. # - x is directly reachable from point y if x is e-distance away from y, and y is a core point import numpy as np import matplotlib.pyplot as plt from random import random # [0.0,1.0) import math from sklearn.utils import shuffle # + # generating random data to visualize density clustering noise_x = np.array([random() for i in range(60)])*10 noise_y = np.array([random() for i in range(60)])*10 block_1_x = np.concatenate([np.array([random() for i in range(30)])+1,np.array([random() for i in range(20)])+2,np.array([random() for i in range(20)])+2]) block_1_y = np.concatenate([np.array([random() for i in range(30)])*3+6,np.array([random() for i in range(20)])+8,np.array([random() for i in range(20)])+9]) block_2_x = np.concatenate([np.array([random() for i in range(50)])*2+1]) block_2_y = np.concatenate([np.array([random() for i in range(50)])*2+1]) block_3_x = np.concatenate([np.array([random() for i in range(50)])+5]) block_3_y = np.concatenate([np.array([random() for i in range(50)])*5+1]) block_4_x = np.concatenate([np.array([random() for i in range(70)])*5+5,np.array([random() for i in range(50)])+8]) block_4_y = np.concatenate([np.array([random() for i in range(70)])+8,np.array([random() for i in range(50)])*5+3]) x = np.concatenate([noise_x,block_1_x,block_2_x,block_3_x,block_4_x]) # concatenate the blocks y = np.concatenate([noise_y,block_1_y,block_2_y,block_3_y,block_4_y]) # - x,y = shuffle(x,y) plt.scatter(x,y) plt.title("Unassigned data") plt.xlabel("x") plt.ylabel("y") plt.show() data_points = [(x[i],y[i]) for i in range(x.shape[0])] # points represented as tuples x.shape,y.shape,len(data_points) def euclidian_distance(point1,point2): """ returns euclidian distance between two points """ return math.sqrt((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) def dbscan(data_points,e=0.7,min_points=5): """ returns the set of input points along with their group assignments args: e: max euclidian distance defining similarly grouped points min_points: minimum number of close points for assigning core points """ core = set() all_points = {} # holds the group assignments, and list of similar points for i in range(len(data_points)): test_point = data_points[i] all_points[test_point] = {"k":0,"neighbors":[]} # k:0 represents noise points for j in range(len(data_points)): point = data_points[j] if i != j and euclidian_distance(test_point,point) <= e: # compute the distance all_points[test_point]["neighbors"].append(point) if len(all_points[test_point]["neighbors"]) >= min_points: # found a core point core.add(test_point) k = 0 # for assigning the groups stack = [] while len(core) > 0: k += 1 stack.append(core.pop()) while len(stack) > 0: a_core = stack.pop() for point in all_points[a_core]["neighbors"]: if point in core: # the neighbor is also a core point core.remove(point) stack.append(point) all_points[point]["k"] = k return [all_points[key]["k"] for key in all_points.keys()] labels = dbscan(data_points) plt.scatter(x,y,c=labels) # ultra dark points are noise plt.title("Assigned data") plt.xlabel("x") plt.ylabel("y") plt.show()
clustering/Density_clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from message import Message from key import Key import numpy as np alpha = 'abcdefghijklmnopqrstuvwxyz' message = Message(alpha) key = np.array(range(26)) key[0] = 25 key[25] = 0 # %%timeit message.map1(key) # %%timeit message.map2(key) from message import Message alpha = 'abcdefghijklmnopqrstuvwxyz' my_message = Message(alpha) # %%timeit my_message.frequencies(alpha) from message import Message from key import Key import numpy as np with open('sample.txt','r') as source: text = source.read() my_message = Message(text) my_message = my_message.filter() encipher_key = Key() encipher_key.random_key() enciphered_message = my_message.map(encipher_key) enciphered_message.text decipher_key = encipher_key.invert() deciphered_message = enciphered_message.map(decipher_key) #deciphered_message.text decipher_key.map # + import numpy as np import random default_alpha = 'abcdefghijklmnopqrstuvwxyz ' class Key(object): def __init__(self, map=[]): self.map = map def substitute(self, new_map): map_out = np.zeros(self.map.shape[0]) for x in range(self.map.shape[0]): map_out[x] = new_map[self.map[x]] return Key(map_out) def invert(self): inverted_map = np.zeros(self.map.shape[0]) for x in range(self.map.shape[0]): inverted_map[self.map[x]] = x return Key(inverted_map) def obtain_key(self, alpha, beta): key = np.zeros(len(beta)) for x in range(len(beta)): key[x] = alpha.find(beta[x]) return Key(key) def random_key(self, alpha = default_alpha): self.map = np.array(range(len(alpha))) random.shuffle(self.map) key = Key(np.array([1,2,0])) inverted_key = key.invert() inverted_key.map # + import numpy as np class Message(object): def __init__(self, text, alpha = 'abcdefghijklmnopqrstuvwxyz '): self.text = text self.alpha = alpha def map(self, key): new_message = '' for x in self.text: x_index = self.alpha.find(x) if x_index == -1: new_message += x else: mapped_to = self.alpha[key.map[x_index]] new_message += mapped_to return Message(new_message) def frequencies(self, alpha = 0): if alpha == 0: alpha = self.alpha counts = np.zeros([len(alpha)]) for i, x in enumerate(alpha): counts[i] = self.text.count(x) rates = counts/len(self.text) return rates def filter(self, alpha = 0): if alpha == 0: alpha = self.alpha filtered_message = '' for x in self.text.lower(): if x in alpha: filtered_message += x return Message(filtered_message, self.alpha) from key import Key import numpy as np with open('sample.txt','r') as source: text = source.read() my_message = Message(text) my_message = my_message.filter() #encipher_key = Key() #encipher_key.random_key() map = np.array(range(27)) encipher_key = Key(map) enciphered_message = my_message.map(encipher_key) enciphered_message.text decipher_key = encipher_key.invert() deciphered_message = enciphered_message.map(decipher_key) deciphered_message.text # + import numpy as np class Message(object): def __init__(self, text, alpha = 'abcdefghijklmnopqrstuvwxyz '): self.text = text self.alpha = alpha def map(self, key): new_message = '' for x in self.text: x_index = self.alpha.find(x) if x_index == -1: new_message += x else: mapped_to = self.alpha[key.map[x_index]] new_message += mapped_to return Message(new_message) def frequencies(self, alpha = 0): if alpha == 0: alpha = self.alpha counts = np.zeros([len(alpha)]) for i, x in enumerate(alpha): counts[i] = self.text.count(x) rates = counts/len(self.text) return rates def filter(self, alpha = 0): if alpha == 0: alpha = self.alpha filtered_message = '' for x in self.text.lower(): if x in alpha: filtered_message += x return Message(filtered_message, self.alpha) from key import Key import numpy as np with open('sample.txt','r') as source: text = source.read() my_message = Message(text) my_message = my_message.filter() encipher_key = Key() encipher_key.random_key() enciphered_message = my_message.map(encipher_key) enciphered_message.text decipher_key = encipher_key.invert() deciphered_message = enciphered_message.map(decipher_key) deciphered_message.text # + import numpy as np import random default_alpha = 'abcdefghijklmnopqrstuvwxyz ' class Key(object): def __init__(self, map=[]): self.map = map def substitute(self, new_map): map_out = np.zeros(self.map.shape[0]) for x in range(self.map.shape[0]): map_out[x] = new_map[self.map[x]] return Key(map_out) def invert(self): inverted_map = np.zeros(self.map.shape[0], dtype = np.int8) for x in range(self.map.size): inverted_map[self.map[x]] = x return Key(inverted_map) def obtain_key(self, alpha, beta): key = np.zeros(len(beta)) for x in range(len(beta)): key[x] = alpha.find(beta[x]) return Key(key) def random_key(self, alpha = default_alpha): self.map = np.array(range(len(alpha))) random.shuffle(self.map) def frequency_key(self, natural_frequencies, observed_frequencies): natural_indices_sorted = np.argsort(natural_frequencies) observed_indices_sorted = np.argsort(observed_frequencies) frequency_key = np.zeros(natural_frequencies.size, dtype = np.int8) for i, x in enumerate(natural_indices_sorted): frequency_key[x] = observed_indices_sorted[i] return Key(frequency_key) def frequency_key2(self, natural_frequencies, observed_frequencies): natural_indices_sorted = np.argsort(natural_frequencies) observed_indices_sorted = np.argsort(observed_frequencies) frequency_key = np.zeros(natural_frequencies.size, dtype = np.int8) for x in range(frequency_key.size): frequency_key[natural_indices_sorted[i]] = observed_indices_sorted[i] return Key(frequency_key) key = Key() new_key=key.frequency_key(frequencies,frequencies) # - key = Key() # %%timeit new_key=key.frequency_key(frequencies,frequencies) # %%timeit new_key=key.frequency_key2(frequencies,frequencies) np.zeros(3) a a = np.array([1,3,2]) for x, i in enumerate(a): print x, i
character_based/test_pad.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Linux/Ubuntu # # ## 1 What is Linux # # ### 1.1 History of Linux # # **Unix** was initially designed and implemented at AT&T Bell Labs 1969 by <NAME>, <NAME>, <NAME>, and # <NAME> # # * First released in 1971 and was written in assembly language # # * Re-written in C by <NAME> in 1973 for better portability (with exceptions to the kernel and I/O) # # **Linux Kernel**: [Linus Torvalds](https://baike.baidu.com/item/林纳斯·本纳第克特·托瓦兹/1034429?fromtitle=Linus%20Torvalds&fromid=9336769&fr=aladdin), a student at University of Helsinki began working on his own operating system, which became the "Linux Kernel", 1991 # # * Linus released his `kernel` for `free download` and helped further developmen # # * Linux as the kernel only, applications on top of the kernel were still missing # # **Linux System**: Linux kernel + `software` # # * `GNU/Linux(Linux)`: Linux kernel + software from the GNU project released under the `GNU Public License (GPL)`:Free to use, # # * The GNU Project by <NAME> started in 1983 Creating a “complete Unix-compatible software system” with entirely free software # # <b style="color:blue">Linux is everywhere</b> # # It appears in many different architectures, from mainframes to `server` to `desktop` to `mobile` and on a staggeringly `wide variety of hardware`. # # * **Linux** powers 100% of the world’s **supercomputers**, # # * most of the **servers** powering the Internet, the majority of financial trades worldwide # # * over two billion **Android** devices. # # ### 1.2 What is a Kernel # # The core component of an OS # # * Manage the system’s resources, memory, file systems # # * Provide the lowest level abstraction layer to upper layer components # # * Inter-process communications and system calls are used to make services available # # ### 1.3 What is a Shell # # An application software running `on top of the kernel` and provides a powerful `interface` to the `system` # # Process user’s commands, gather input from user and execute programs # # `Types of shell` with varied features # # * sh(Bourne shell) # # * csh(C shell) # # * ksh(Korn shell) # # * `bash(Bourne Again shell)` # # The simple **bash** command # ``` # $ls -l /usr/lib # ``` # ![Linux-shell](./img/Linux-shell.png) # # ![Linux-OS](./img/Linux-OS.jpg) # # ## 2 Linux Distributions # # * **Essential components**: Linux kernel + GNU system utilities + installation scripts + management utilities etc. # # * Many software vendors release their own packages, known as `distributions` # # * Debian, [Ubuntu](https://www.ubuntu.com/), Linux Mint,[Ubuntukylin](http://www.ubuntukylin.com/) # # * Red Hat,[Fedora](https://getfedora.org/en/), CentOS, Scientific Linux # # * Slackware, [OpenSUSE](https://www.opensuse.org/) # # * Gentoo # # * **Mobile** OS: [Android](https://en.wikipedia.org/wiki/Android_(operating_system))(Google),[Tizen](https://www.tizen.org/)(The Linux Foundation) # # **[DistroWatch](https://distrowatch.com/)** # # DistroWatch is a website dedicated to talking about, reviewing and keeping up to date with open source operating systems. # # This site particularly focuses on `Linux distributions` and flavours of BSD, though other open source operating systems are sometimes discussed. # # ## 3 Desktop Environment # # Linux distributions offer a variety of **`desktop environment`**: # # **GNOME,KDE,MATE,Xfce** # # * MATE and Xfce are the more lightweight desktop environments. # # ![Linux-desktop](./img/Linux-desktop.jpg) # # ## 4 Linux is important for engineer and scientist # # Download stats across **all packages** on PyPI: # # https://pypistats.org/packages/__all__ # # The download proportion of **Linux >80%** # # ![pypi-os](./img/pypi-os.jpg) # # Let us to get the PyPI downloads by operating system of packages with `pypistats` # # ``` # >python -m pip install pypistats # ``` # ``` # >pypistats system package-name # ``` # >Note:`null` value is generated by `monitoring tools` and should not be counted as user's downloads. # # **The PyPI downloads by operating system of SciPy packages**: # !pypistats system numpy # !pypistats system scipy # !pypistats system matplotlib # **The PyPI downloads by operating system of IAPWS packages**: # !pypistats system iapws # !pypistats system seuif97 # The **maximum** proportion of the operating system is **Linux.** # # The **proportion** of using Linux operating system( <b style="color:blue">>70%</b>) was **higher than** Windows and others. # # **Linux is important for engineer and scientist!** # # ## 5 Ubuntu # # [Ubuntu](https://www.ubuntu.com/) is a free and open-source operating system and [Linux](https://en.wikipedia.org/wiki/Linux) distribution based on [Debian](https://www.debian.org/). Ubuntu is offered in three official editions: <b style="color:blue">Ubuntu Desktop</b> for personal computers, <b style="color:blue">Ubuntu Server</b> for servers and the cloud, and <b style="color:blue">Ubuntu Core</b> for Internet of things devices and robots. # # Ubuntu is produced by **Canonical** and the developer community, under a meritocratic governance model. # # Ubuntu is named after the Southern African philosophy of ubuntu (literally, 'human-ness'), which Canonical suggests can be loosely translated as "humanity to others" or "I am what I am because of who we all are" # # * **New releases** of Ubuntu occur every **6** months. # # # * The **long-term support (LTS)** releases occur every **2** # # * The most recent LTS is `18.04 LTS (Bionic Beaver)`,supported for ten years. # # [优麒麟(Ubuntu Kylin)](http://www.ubuntukylin.com/) # # 由中国 CCN(由CSIP、Canonical、NUDT三方联合组建)开源创新联合实验室与天津麒麟信息技术有限公司主导开发的全球开源项目,其宗旨是通过研发用户友好的桌面环境以及特定需求的应用软件,为全球 Linux 桌面用户带来非凡的全新体验! # 优麒麟操作系统是 Ubuntu 官方衍生版,得到来自 Debian、Ubuntu、Mate、LUPA 等国际社区及众多国内外社区爱好者的广泛参与和热情支持 # # [银河麒麟](http://www.kylinos.cn/) # # 银河麒麟操作系统及相关衍生产品已成功应用于国防、政务、电力、金融、能源、教育等行业,基于银河麒麟操作系统和飞腾CPU的自主可控产品及方案已经成为我国自主安全可控信息系统的核心技术应用。 # # ![UbuntuKylin](./img/ubuntukylin.jpg) # # # # [Raspberry Pi](https://www.raspberrypi.org/) # # Our mission is to put the power of computing and digital making into the hands of people all over the world. We do this so that more people are able to harness the power of computing and digital technologies for work, to solve problems that matter to them, and to express themselves creatively. # # **Raspbian** is our official operating system for all models of the Raspberry Pi. Download it here, or use NOOBS, our easy installer for Raspbian and more. # # ![raspberryp](./img/raspberrypi.jpg) # # # **Ubuntu on Windows** # # you may install `Ubuntu on Windows` through `Windows Store` to use Ubuntu Terminal and run Ubuntu command line utilities include `bash,ssh,git,apt and any more`. # # ![UbuntuOnWindows](./img/UbuntuOnWindows.jpg) # ## 6 Linux File System # # ### 6.1 Files # # A simple description of the UNIX system, also applicable to Linux, is this: # # * **"On a UNIX system, everything is a file; if something is not a file, it is a process."** # # **Sorts of files** # # Most files are just files, called **regular files**(普通文件); they contain normal data, for example text files, executable files or programs, input for or output from a program and so on. # # While it is reasonably safe to suppose that **everything you encounter on a Linux system is a file**, there are some exceptions. # # * **Directories** files that are lists of other files. # # * **Special** files: the mechanism used for input and output. Most special files are in `/dev` # # * **Links** : a system to make a file or directory visible in multiple parts of the system's file tree. # # * **(Domain) sockets**: a special file type, similar to TCP/IP sockets, providing inter-process networking protected by the file system's access control. # # * **Named pipes:** act more or less like sockets and form a way for processes to communicate with each other, without using network socket semantics. # # # ### 6.2 File Directory Structure # # * All`files` are arranged in `directories`. # # * These `directores` are organized into the `file system` # # ![Linux File System](./img/linux-file-system.jpg) # # # **Important Directories** # # # * /bin: contains files that are essential for system operation, available for use by all # users. # # * /lib,/lib64: contains libraries that are essential for system operation, available for use by # all users. # # * /var: used to store files which change frequently (system level not user level) # # * /etc: contains various system configurations # # # * **/dev**: contains various **devices** such as hard disk, CD-ROM drive etc(**In Linux, everything is a file**) # # # * /sbin: same as bin but only accessible by root # # * /tmp: temporary file storage # # # * **/boot**: contains `bootable kernel and bootloader` # # # * **/usr:** contains user documentations, binaries, libraries etc # # # * **/home:** contains home directories of **all users**. # # * This is the directory where you are at when you login to a Linux/UNIX system. # # ![dev](./img/linux-dev.jpg) # # ### 6.3 File Path # # File Path(Definition): position/address in the `directory tree` # # **Absolute path** # # `Uniquely` defined and does **NOT depend on the current path** # # **Relative path** # # **Depend on the current location** in the directory tree # # * . is the current working directory # # * .. is one directory up # # # ### 6.4 Linux is Case Sensitive # # All names are **case sensitive** # # * Commands, variables, files etc. # # Example: `MyFile.txt, myfile.txt, MYFILE.TXT` are three different files in Linux # # # ### 6.5 Linux File Permission # # Designed as the multi **user** environment, the **access restriction** of files to other users on the system is embedded. # # Three types of **file permission** # # * Read (r) # # * Write (w) # # * Execute (x) # # Three types of **user** # # * User (u) (owner) # # * Group (g) (group members) # # * World (o) (everyone else on the system) # # Each file in Linux has the following attributes: # # * `Owner permissions`: determine what actions the owner of the file can perform on a file # # * `Group permissions`: determine what actions a user, who is a member of the group that a file belongs to, can perform on a file # # * `Other(world) permissions`: indicate what action all other users can perform on a file # # ![UbuntuOnWindows](./img/UbuntuOnWindows.jpg) # # The `-l`option to **ls** displays the file type,using the **first column** indicates the type of a **file/dir/link** # # * `d`: for directory # # * `l`: for `symbolic link`(符号链接(软链接):将路径名链接到一个文件) # # * `-` for normal file # # > **A symbolic link** is a `file` that links to another file or directory using its path. you can think of a symbolic link as `a shortcut` to a file or directory (folder). symbolic links may be used in the command line, or in a script or another program. # # ![linux-file-permissions.jpg](./img/linux-file-permission.jpg) # # ### 6.7 Changing File Permission # # **chmod** is a *NIX command to change permissions on a file # # Usage: # ```bash # chmod <option> <permissions> <file or directory name> # ``` # * –R: change permission recursively in a directory(all files in a directory) # # **chmod in Symbolic Mode:** # # |Chmod| operator Description| # |:-------:|:--------:| # |+ |Adds the designated permission(s) to a file or directory| # |- |Removes the designated permission(s) from a file or directory| # |= |Sets the designated permission(s) and removes other permission(s| # # ```bash # chmod u-x filename # ``` # + # %%file ./demo/src/hello.c /* gcc -o hello hello.c */ #include <stdio.h> int main() { printf("C says Hello, world!\n"); return 0; } # - # !gcc -o ./demo/bin/hello ./demo/src/hello.c # ![](./img/linux-hello.png) # ## 7 Programming C/C++ Under Linux # # Real time deformable face tracking in C++ with OpenCV 3. # # https://github.com/kylemcdonald/FaceTracker # # ## 8 Installing Ubuntu for Programming with Python,C/C++ # # https://github.com/PySEE/home/blob/S2019/guide/Ubuntu-Python-CPP(Chinese).md # # Reference # # * [How to install and Get Started with Ubuntu Desktop 16.04LTS](http://www3.ntu.edu.sg/home/ehchua/programming/howto/Ubuntu_HowTo.html) # # * edX: Introduction to Linux https://www.edx.org/course/introduction-to-linux # # * Ubuntu https://www.ubuntu.com/ # # * Ubuntukylin https://www.ubuntukylin.com/ # #
notebook/Unit8-6-Ubuntu.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # build ofa resnet50 from ofa.model_zoo import ofa_net ofa_network = ofa_net('ofa_resnet50', pretrained=True) # + pycharm={"name": "#%%\n"} # accuracy predictor import torch from ofa.nas.accuracy_predictor import AccuracyPredictor, ResNetArchEncoder from ofa.utils import download_url image_size_list = [128, 144, 160, 176, 192, 224, 240, 256] arch_encoder = ResNetArchEncoder( image_size_list=image_size_list, depth_list=ofa_network.depth_list, expand_list=ofa_network.expand_ratio_list, width_mult_list=ofa_network.width_mult_list, base_depth_list=ofa_network.BASE_DEPTH_LIST ) acc_predictor_checkpoint_path = download_url( 'https://hanlab.mit.edu/files/OnceForAll/tutorial/ofa_resnet50_acc_predictor.pth', model_dir='~/.ofa/', ) device = 'cuda:0' if torch.cuda.is_available() else 'cpu' acc_predictor = AccuracyPredictor(arch_encoder, 400, 3, checkpoint_path=acc_predictor_checkpoint_path, device=device) print('The accuracy predictor is ready!') print(acc_predictor) # + pycharm={"name": "#%%\n"} # build efficiency predictor from ofa.nas.efficiency_predictor import ResNet50FLOPsModel efficiency_predictor = ResNet50FLOPsModel(ofa_network) # + pycharm={"name": "#%%\n"} # search import random for i in range(10): subnet_config = ofa_network.sample_active_subnet() image_size = random.choice(image_size_list) subnet_config.update({'image_size': image_size}) predicted_acc = acc_predictor.predict_acc([subnet_config]) predicted_efficiency = efficiency_predictor.get_efficiency(subnet_config) print(i, '\t', predicted_acc, '\t', '%.1fM MACs' % predicted_efficiency)
tutorial/ofa_resnet50_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pickle import matplotlib.pyplot as plt from scipy.stats.mstats import gmean import seaborn as sns from statistics import stdev from math import log import numpy as np from scipy import stats # %matplotlib inline # - price_100c = pickle.load(open("total_price_non.p","rb")) price_100 = pickle.load(open("C:\\Users\\ymamo\Google Drive\\1. PhD\\Dissertation\\SugarScape\Initial\\NetScape_Elegant\\total_price1.p", "rb")) # + from collections import defaultdict def make_distro(price_100): all_stds =[] total_log = defaultdict(list) for run, output in price_100.items(): for step, prices in output.items(): log_pr = [log(p) for p in prices] if len(log_pr) <2: pass else: out = stdev(log_pr) total_log[run].append(out) all_stds.append(out) return all_stds # - price_cluster = make_distro(price_100c) price_norm = make_distro(price_100) # + fig7, ax7 = plt.subplots(figsize = (7,7)) ax7.hist(price_cluster, 500, label = "Hierarchy") ax7.hist(price_norm, 500, label = "No Hierarchy") plt.title("Network Approach:\nPrice Distribution of SDLM of 100 Runs", fontsize = 20, fontweight = "bold") plt.xlabel("SDLM of Step", fontsize = 15, fontweight = "bold") plt.ylabel("Frequency of SDLM", fontsize = 15, fontweight = "bold") #plt.xlim(.75,2) #plt.ylim(0,5) plt.legend() # + from statistics import mean stan_multi_s = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Standard\\stan_multi_sur.p", "rb")) stan_multi_t = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Standard\\stan_multi_time.p", "rb")) brute_multi_s = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Brute\\brute_multi_sur.p", "rb")) brute_multi_t = pickle.load(open("C:\\Users\\ymamo\\Google Drive\\1. PhD\\Dissertation\\SugarScape\\NetScape_Brute\\brute_multi_time.p", "rb")) net_multi_s = pickle.load(open("net_multi_sur_non.p", "rb")) net_multi_t =pickle.load(open("net_multi_time_non.p", "rb")) net_mean = mean(net_multi_s) brute_mean = mean(brute_multi_s) stan_mean = mean(stan_multi_s) net_time = round(mean(net_multi_t),2) brute_time = round(mean(brute_multi_t),2) stan_time = round(mean(stan_multi_t),2) # - t, p = stats.ttest_ind(stan_multi_s,brute_multi_s) brute_p = round(p * 2, 3) t2, p2 = stats.ttest_ind(stan_multi_s,net_multi_s) net_p = round(p * 2, 3) print (net_p, brute_p) fig5, ax5 = plt.subplots(figsize=(7,7)) plt.hist(net_multi_s, label = "Network Approach") plt.hist(stan_multi_s, label = "Standard Approach") plt.hist(brute_multi_s, label = "Explicit Approach") plt.text(56.5, 28.5, "Network mean: "+str(net_mean) +"\nStandard mean: " + str(stan_mean)+ "\nExplicit mean: "+str(stan_mean)) plt.legend() plt.title("Survivor Histogram of 100 Runs, 1000 Steps \nLink Threshold 10; with Hierarchy", fontweight = "bold", fontsize = 15) t, p = stats.ttest_ind(stan_multi_t,brute_multi_t) brute_t_p = (p * 2,10) t2, p2 = stats.ttest_ind(stan_multi_t,net_multi_t) net_t_p = (p * 2, 10) print (net_t_p, brute_t_p) fig6, ax6 = plt.subplots(figsize=(7,7)) plt.hist(net_multi_t, label = "Network Approach") plt.hist(stan_multi_t, label = "Standard Approach") plt.hist(brute_multi_t, label = "Explicit Approach") #plt.text(78, 25, "Network p-value: "+str(net_t_p) +"\nExplicit p-value: "+str(brute_t_p)) plt.legend() plt.title("Time Histogram of 100 Runs, 1000 steps \nLink Threshold 10; with Hierarchy", fontweight = "bold", fontsize = 15) plt.text(70, 24, "\nNetwork Mean: "+str(net_time) +"\nStandard Mean: "+str(stan_time) + "\nExplicit Approach: "+str(brute_time)) ind_e = price_100c["Run95"] # + ## Calculate price # + x = [] y =[] for st, pr in ind_e.items(): #if step <=400: x.append(st) y.append(gmean(pr)) y[0] # - fig, ax = plt.subplots(figsize = (7,7)) ax.scatter(x,y) plt.title("Network Approach with Hierarchy:\nMean Trade Price", fontsize = 20, fontweight = "bold") plt.xlabel("Time", fontsize = 15, fontweight = "bold") plt.ylabel("Price", fontsize = 15, fontweight = "bold") x_vol = [] y_vol = [] total = 0 for s, p in ind_e.items(): #if step <=400: x_vol.append(s) y_vol.append(len(p)) total += len(p) total fig2, ax2 = plt.subplots(figsize = (7,7)) ax2.hist(y_vol, 100) plt.title("Network Approach with Hierarchy:\nTrade Volume Histogram", fontsize = 20, fontweight = "bold") plt.xlabel("Trade Volume of Step", fontsize = 15, fontweight = "bold") plt.ylabel("Frequency Trade Volume", fontsize = 15, fontweight = "bold") #plt.ylim(0,400) fig2, ax2 = plt.subplots(figsize = (7,7)) ax2.plot(x_vol, y_vol) plt.title("Network Approach with Hierarchy:\nTrade Volume", fontsize = 20, fontweight = "bold") plt.xlabel("Time", fontsize = 15, fontweight = "bold") plt.ylabel("Volume", fontsize = 15, fontweight = "bold") #ax2.text(600,300, "Total Trade Volume: \n "+str(total), fontsize = 15, fontweight = 'bold') #plt.ylim(0,400) # + from statistics import stdev from math import log x_dev =[] y_dev = [] x_all = [] y_all = [] log_prices = {} for step, prices in ind_e.items(): log_prices[step] = [log(p) for p in prices] for step, log_p in log_prices.items(): #if step <= 400: if len(log_p) <2: pass else: for each in log_p: x_all.append(step) y_all.append(each) x_dev.append(step) y_dev.append(stdev(log_p)) # - from numpy.polynomial.polynomial import polyfit fig3, ax3 = plt.subplots(figsize=(7,7)) ax3.scatter(x_all,y_all) plt.plot(x_dev,y_dev,'-', color ='red') plt.title("Network Approach with Hierarchy:\nStandard Deviation of Logarithmic Mean", fontsize = 20, fontweight = "bold") plt.xlabel("Time", fontsize = 15, fontweight = "bold") plt.ylabel("Logarithmic Price", fontsize = 15, fontweight = "bold") net_emergent =pickle.load(open("type_df_non.p", "rb")) net_emergent["Run67"][999]
Policy-Network/.ipynb_checkpoints/Data Analysis-Emergent-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # PA_4: Feedforward Neural Network # # ## Aim # Train and test a Feedforward Neural Network for MNIST digit classification. # # ## Procedure # * Download `mnist_file.rar` which contains mnist data as a *pickle* file and read `mnist.py` for loading partial mnist data. # * Run read `mnist.py` file which will give 1000 train and 500 test images per each class. # * x train,y train gives the image $784\times1$ and corresponding label for training data. Similarly, for test data. # * Write # 1. Neural network model using library functions. # 2. Your own neural network model and train with Back propagation # * Test both models with the test data. # * Find the confusion matrix and report the accuracy. # + import numpy as np from utils import visualise from read_mnist import load_data import random y_train,x_train,y_test,x_test=load_data() print("Train data label dim: {}".format(y_train.shape)) print("Train data features dim: {}".format(x_train.shape)) print("Test data label dim: {}".format(y_test.shape)) print("Test data features dim:{}".format(x_test.shape)) visualise(x_train) # + import numpy as np import random import itertools import time from sklearn.metrics import f1_score, precision_score, recall_score from read_mnist import load_data import pickle def sigmoid(x): return 1.0/(1+ np.exp(-x)) def sigmoid_grad(x): return x * (1 - x) def tanh_grad(x): return 1-np.power(x,2) def softmax(x): e_x = np.exp(x - np.max(x)) return e_x /np.sum(e_x, axis=1, keepdims=True) def relu(x): return x * (x > 0) def relu_grad(x): return (x>0)*1 def cross_entropy(y_,y): n = y.shape[0] nll = -np.log(y_[range(n),y]) return np.mean(nll) def delta_cross_entropy(y_,y): n = y.shape[0] y_[range(n),y] -= 1 return y_/n class NN: def __init__(self, hidden_layers, hidden_neurons, hidden_activation, lr=0.01): self.hidden_layers = hidden_layers self.hidden_neurons = hidden_neurons self.hidden_activation = hidden_activation self.lr=lr np.random.seed(786) self.W1 = 0.1* np.random.randn(x_train.shape[1],self.hidden_neurons) self.b1 = np.zeros((1,self.hidden_neurons)) self.W2 = 0.1* np.random.randn(self.hidden_neurons,10) self.b2 = np.zeros((1,10)) def forward(self,x_train): s1=np.dot(x_train, self.W1) + self.b1 if self.hidden_activation == 'sigmoid': a1 = sigmoid(s1) elif self.hidden_activation=='tanh': a1 = np.tanh(s1) elif self.hidden_activation=='relu': a1 = relu(s1) else: raise Exception('Error: Activation not implemented') s2 = np.dot(a1, self.W2) + self.b2 a2 = softmax(s2) loss=cross_entropy(a2,y_train) return(loss,s1,a1,s2,a2) def backward(self, s1, a1, s2, a2): delta3=delta_cross_entropy(a2,y_train) dW2 = np.dot(a1.T, delta3) db2 = np.sum(delta3, axis=0, keepdims=True) if self.hidden_activation=='sigmoid': delta2 = delta3.dot(self.W2.T) * sigmoid_grad(a1) elif self.hidden_activation == 'tanh': delta2 = delta3.dot(self.W2.T) * tanh_grad(a1) elif self.hidden_activation == 'relu': delta2 = delta3.dot(self.W2.T) * relu_grad(a1) else: raise Exception('Error: Activation not implemented') dW1 = np.dot(x_train.T, delta2) db1 = np.sum(delta2, axis=0) self.W1 += -self.lr * dW1 self.b1 += -self.lr * db1 self.W2 += -self.lr * dW2 self.b2 += -self.lr * db2 def predict(self, x): s1=np.dot(x, self.W1) a1 = (sigmoid(s1)) s2 = np.dot(a1, self.W2) a2 = softmax(s2) return np.argmax(a2, axis=1) def save_model(self, name): params = { 'W1': self.W1, 'b1': self.b1, 'W2': self.W2, 'b2': self.b2} with open(name, 'wb') as handle: pickle.dump(params, handle, protocol=pickle.HIGHEST_PROTOCOL) epochs=1000 # hyperparameter variation lr=0.1 neurons = [32,64,128,256] activations = ['sigmoid', 'relu', 'tanh'] experiments = list(itertools.product(neurons, activations)) for (hidden_neurons,hidden_activation) in experiments: print('\n############ Activation function: {} No. of neurons: {} ############'.format(hidden_activation, hidden_neurons)) model=NN(hidden_layers=5,hidden_neurons=hidden_neurons,hidden_activation=hidden_activation, lr=lr) print('\nTraining started!') start = time.time() for epoch in range(epochs): loss,s1,a1,s2,a2 = model.forward(x_train) if epoch%100==0: print("Loss: {} Training progress: {}/{}".format(loss,epoch,epochs)) model.backward(s1, a1, s2, a2) name = 'model_'+str(hidden_activation)+'_'+str(hidden_neurons)+'.pickle' model.save_model(name=name) stop = time.time() print('Training finished in {} s'.format(stop - start)) test_preds = model.predict(x_test) print('Test Results-Accuracy: {} F1-Score: {}, Precision: {} Recall: {}'.format( np.mean(test_preds == y_test), f1_score(y_test, test_preds, average='micro'), precision_score(y_test, test_preds, average='micro'), recall_score(y_test, test_preds, average='micro') )) # - # Load model parameters for (hidden_neurons,hidden_activation) in experiments: name = 'trained_model/model_'+str(hidden_activation)+'_'+str(hidden_neurons)+'.pickle' print(name) with open(name, 'rb') as handle: b = pickle.load(handle)
NN_scratch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # # EP : Binary tree from binarytree import tree t = tree(3, True) print(t) # ## Tree traversal # + def tree_traversal(root): if root: print(f'Preorder: {root.value}') tree_traversal(root.left) tree_traversal(root.right) tree_traversal(t) # + # tree? # - f = lambda x: 2/(1+np.exp(0.066*x)) f(80) # + # # !mv puzzles.ipynb array_string.ipynb # -
binary_tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Coding Exercise #0701 # ### 1. TensorFlow basics: # + # Install once. # # !pip install tensorflow # OR # # !pip install --user tensorflow-gpu # - import tensorflow as tf import numpy as np # #### 1.1. Hello World! hello = tf.constant("Hello World!") with tf.Session() as sess: print(sess.run(hello)) # #### 1.2. Creating tensors: hello = tf.constant("Hello World!") # String. const_scalar = tf.constant(7) # A scalar tensor. const_matrix = tf.constant([[1,2], [3,4]]) # A matrix tensor. mat_fill = tf.fill((4,4),9) # A 4x4 tensor filled with 9s. mat_zeros = tf.zeros((3,3)) # A 3x3 tensor filled with 0s. mat_ones = tf.ones((5,5)) # A 5x5 tensor filled with 1s. mat_randn = tf.random_normal((3,3), mean=0, stddev=1.0) # 3x3 random normal tensor. mat_randu = tf.random_uniform((4,4), minval=0, maxval=1.0) # 4x4 random uniform tensor. my_ops=[hello, const_scalar, const_matrix, mat_fill, mat_zeros, mat_ones, mat_randn, mat_randu] with tf.Session() as sess: for op in my_ops: res = sess.run(op) print(type(res)) print('\n') print(res) print('\n') # #### 1.3. Math operations: n1 = tf.constant(1) n2 = tf.constant(2) n3 = n1* n2 with tf.Session() as sess: print(sess.run(n3)) n1 = tf.constant(4) n2 = tf.constant(5) n3 = tf.add(n1,n2) with tf.Session() as sess: print(sess.run(n3)) n1 = tf.constant(2) n2 = tf.constant(3) n3 = tf.multiply(n1,n2) with tf.Session() as sess: print(sess.run(n3)) n1 = tf.constant(2) n2 = tf.constant(3) n3 = tf.pow(n1,n2) with tf.Session() as sess: print(sess.run(n3)) # #### 1.4. Matrix functions and operations: m1 = tf.constant([[1,2], [3,4]]) m2 = tf.constant([[1], [2]]) m3 = tf.matmul(m1, m2) # Matrix multiplication. Different from the element-wise multiplication with multiply(). with tf.Session() as sess: print(sess.run(m3)) m1 = tf.constant([[1,2], [3,4]]) m2 = tf.constant([[5, 6], [7, 8]]) m3 = tf.matmul(m1, m2) with tf.Session() as sess: print(sess.run(m3)) m3 = tf.multiply(m1, m2) with tf.Session() as sess: print(sess.run(m3)) m3 = m1*m2 with tf.Session() as sess: print(sess.run(m3)) m3 = m1 + m2 with tf.Session() as sess: print(sess.run(m3)) m3 = m1-m2 with tf.Session() as sess: print(sess.run(m3)) m3 = m1 / m2 with tf.Session() as sess: print(np.round(sess.run(m3),3)) # Matrix functions. m = tf.constant([[1.0,2.0],[3.0,4.0]]) m_diagonal = tf.matrix_diag([1,2,3]) m_transpose = tf.matrix_transpose(m) m_inverse = tf.matrix_inverse(m) m_multiplied = tf.matmul(m, m_inverse) m_determinant = tf.matrix_determinant(m) my_matrices = [m, m_diagonal, m_transpose, m_inverse, m_multiplied, m_determinant] with tf.Session() as sess: for mat in my_matrices: res = sess.run(mat) print(np.round(res,3)) print('\n') # #### 1.5. TensorFlow Variable: # Initializing Variables #1 x = tf.Variable(initial_value=5, name='x') # Define. y = tf.Variable(initial_value=6, name='y') # Define. f = (x - y)*y + 3 # Define. sess = tf.Session() sess.run(x.initializer) # Initialize. sess.run(y.initializer) # Initialize. res = sess.run(f) print(res) sess.close() # Initializing Variables #2 x = tf.Variable(initial_value=3, name='x') # Define. y = tf.Variable(initial_value=2, name='y') # Define. f = x*y + y + 3 # Define. with tf.Session() as sess: x.initializer.run() # Initialize. The same as "sess.run(x.initializer)". y.initializer.run() # Initialize. The same as "sess.run(y.initializer)". res = f.eval() # The same as "res = sess.run(f)". print(res) # Initializing Variables #3 x = tf.Variable(initial_value=7, name='x') # Define. y = tf.Variable(initial_value=3, name='y') # Define. f = x*y*y - y - 1 # Define. init = tf.global_variables_initializer() # Prepare. with tf.Session() as sess: sess.run(init) # Initialize. res = f.eval() # The same as "res = sess.run(f)". print(res) my_tensor = tf.random_uniform((4,4),0,1) my_var = tf.Variable(initial_value = my_tensor) # Initial value from a tensor. #my_matrix = np.random.uniform(0,1,(4,4)) #my_var = tf.Variable(initial_value = my_matrix) # Initial value from a Numpy array. init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) print(sess.run(my_var)) # #### 1.6. TensorFlow Placeholder a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) y = tf.multiply(a, b) with tf.Session() as sess: res = sess.run(y, feed_dict={a:2, b:3}) # Feed the actual valules. print(res) # Simulated data as NumPy array. np.random.seed(123) X1 = np.random.uniform(0.0, 1.0, (3,3)) X2 = np.random.normal(0.0, 1.0, (10,3)) b0 = np.array([1,2,3]).reshape((-1,1)) # # The data may be a DataFrame. # X1 = pd.DataFrame(X1) # X2 = pd.DataFrame(X2) # X = tf.placeholder(tf.float32, shape=(None,3)) # "None" means that the number of rows is still undefined. b = tf.placeholder(tf.float32, shape=(3,1)) y = tf.matmul(X, b) with tf.Session() as sess: print(sess.run(y, feed_dict={X:X1, b:b0})) # Feed in the data. print("\n") print(sess.run(y, feed_dict={X:X2, b:b0})) # Feed in the data.
SIC_AI_Coding_Exercises/SIC_AI_Chapter_08_Coding_Exercises/ex_0701.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Rdkit_ker # language: python # name: rdkit # --- # + import progentrl as gentrl import torch import pandas as pd from tqdm import tqdm_notebook import warnings warnings.simplefilter('ignore') # - from pytorch_lightning import Trainer # # Reward Function # This section is for defining the reward function. # + from moses.metrics import mol_passes_filters, QED, SA, logP from moses.metrics.utils import get_n_rings, get_mol def get_num_rings_6(mol): r = mol.GetRingInfo() return len([x for x in r.AtomRings() if len(x) > 6]) def penalized_logP(mol_or_smiles, masked=False, default=-5): mol = get_mol(mol_or_smiles) if mol is None: return default reward = logP(mol) - SA(mol) - get_num_rings_6(mol) if masked and not mol_passes_filters(mol): return default return reward # - # ! wget https://media.githubusercontent.com/media/molecularsets/moses/master/data/dataset_v1.csv df = pd.read_csv('dataset_v1.csv') df = df[df['SPLIT'] == 'train'] df['plogP'] = df['SMILES'].apply(penalized_logP) df.to_csv('train_plogp_plogpm.csv', index=None) # + md = gentrl.MolecularDataset(sources=[ {'path':'train_plogp_plogpm.csv', 'smiles': 'SMILES', 'prob': 1, 'plogP' : 'plogP', }], props=['plogP']) from torch.utils.data import DataLoader train_loader = DataLoader(md, batch_size=50, shuffle=True, num_workers=1, drop_last=True) # - # # Defining the Encoder, Decoder and Descriptors enc = gentrl.RNNEncoder(latent_size=50) dec = gentrl.DilConvDecoder(latent_input_size=50) latent_descr = 50 * [('c', 20)] feature_descr = [('c', 20)] # # Training the VAE (pretrain Step) model = gentrl.gentrlVAE(enc, dec, train_loader, latent_descr, feature_descr) # ## Pytorch-Lightning trainer (For more details refer [this](https://pytorch-lightning.readthedocs.io/en/latest/trainer.html) ) # - max_epochs: maximum Epochs # - gpus: No of gpus (-1 means all of them and 0 means cpu) trainer = Trainer(max_epochs=20, gpus=-1) trainer.fit(model) # Training the model # ## Saving the model gentrl.save(model, './model') # Returns true on successful completion # # Reinforcement Learning (train_rl Step) model = gentrl.gentrlRL(penalized_logP, enc, dec, latent_descr, feature_descr, load_model='./model/') trainer = Trainer(max_epochs=10, gpus=-1) trainer.fit(model) gentrl.save(model, './model') # # Sampling Molecules (Sampling step) model(10)
Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges). # # Challenge Notebook # ## Problem: Determine if a tree is a valid binary search tree. # # * [Constraints](#Constraints) # * [Test Cases](#Test-Cases) # * [Algorithm](#Algorithm) # * [Code](#Code) # * [Unit Test](#Unit-Test) # * [Solution Notebook](#Solution-Notebook) # ## Constraints # # * Can the tree have duplicates? # * Yes # * If this is called on a None input, should we raise an exception? # * Yes # * Can we assume we already have a Node class? # * Yes # * Can we assume this fits in memory? # * Yes # ## Test Cases # # <pre> # Valid: # 5 # / \ # 5 8 # / / # 4 6 # \ # 7 # # Invalid: # 5 # / \ # 5 8 # / \ / # 4 9 7 # </pre> # ## Algorithm # # Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst_validate/bst_validate_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start. # ## Code # %run ../bst/bst.py # %load ../bst/bst.py class BstValidate(Bst): def validate(self): # TODO: Implement me pass # ## Unit Test # **The following unit test is expected to fail until you solve the challenge.** # + # # %load test_bst_validate.py from nose.tools import assert_equal from nose.tools import raises class TestBstValidate(object): @raises(Exception) def test_bst_validate_empty(self): validate_bst(None) def test_bst_validate(self): bst = BstValidate(Node(5)) bst.insert(8) bst.insert(5) bst.insert(6) bst.insert(4) bst.insert(7) assert_equal(bst.validate(), True) bst = BstValidate(Node(5)) left = Node(5) right = Node(8) invalid = Node(20) bst.root.left = left bst.root.right = right bst.root.left.right = invalid assert_equal(bst.validate(), False) print('Success: test_bst_validate') def main(): test = TestBstValidate() test.test_bst_validate_empty() test.test_bst_validate() if __name__ == '__main__': main() # - # ## Solution Notebook # # Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/bst_validate/bst_validate_solution.ipynb) for a discussion on algorithms and code solutions.
graphs_trees/bst_validate/bst_validate_challenge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Chapter 2. Twitter Data ETL : Advanced Project in Digital Media Engineering # # - pervious chapter 1, about NOAA Data ETL, can be redirected from the link __[here](http://nbviewer.jupyter.org/github/ShawnHouCHN/Advanced-Project-in-Digital-Media-Engineering-E17/blob/master/notebooks/USA%20climatological%20station%20data%20parser.ipynb)__ # # - next chapter 3, about data analysis, can be redirected from the link __[here](http://nbviewer.jupyter.org/github/ShawnHouCHN/Advanced-Project-in-Digital-Media-Engineering-E17/blob/master/notebooks/USA%20twitter-weather%20data%20analysis%20.ipynb)__ # *** # In this chapter, the notebook will show the extraction, transfer and Some visualizations have been made to give a intuitvie insights of the result. <br> # + # # %load extract_weather_keywords.py # #!/usr/bin/env python2 """ Created on Wed Oct 11 15:10:40 2017 @author: omd """ # Docs :) # 0 tweetid # 1 userid # 2 timestamp # 3 reply-tweetid # 4 reply-userid # 5 source # 6 truncated? # 7 geo-tag # 8 location # 9 tweet-text # 10 twittername (text) # 11 twittername (handle) # Determine whether code is running on the cluster or locally (for testing) import socket achtung = 'achtung' in socket.gethostname() # If testing locally, find the Spark instalation if not achtung: import findspark findspark.init() import datetime from pyspark import SparkContext import os import numpy as np import shutil import time # If cluster, import stuff to interact with Hadoop file system if achtung: import pydoop.hdfs as hdfs from pushover import Client from copy import deepcopy from dateutil.parser import parse import json import re def pushme(msg, title = ""): Client().send_message(msg, title=title) ###################################################################### APP_NAME = "Weather keyword extractor" # Fraction of data to load sample_fraction = 1.0 SAVE_LOCAL = False # Whether to extract data from HDFS FILES = "tweets.2016-*" # File glob thingy to read in on cluster outname = "weather_keyword_counts.tsv" ###################################################################### if achtung: outpath = outname #"/net/data/bjarkemoensted/"+outname else: outpath = outname if achtung: path = "hdfs://megatron.ccs.neu.edu/user/amislove/twitter/gardenhose-data/summarized/"+FILES else: path = "sample.txt" def normalize(loc, state_codes_b): '''Normalizes location strings. Little Bjarke wrote this.''' # remove unicode loc = loc.encode('unicode-escape') loc = loc.decode('unicode_escape').encode('ascii','ignore') # remove non-alphanumeric loc = loc.lower() loc = re.sub('[^0-9a-zA-Z]+', ' ', loc) # remove US from right side loc = ' '.join(loc.split()) us_names = [' us', ' usa', ' united states', ' united states of america'] for pattern in us_names: loc = re.sub('%s$' % pattern, '', loc) # abbreviate state names for s in state_codes_b.value: loc = loc.replace(s[0], ' '+s[1]) # cleaning whitespace uses loc = ' '.join(loc.split()) loc = loc.strip() if loc == '': return None return loc def process(rawline, keyword_map_b): '''Converts a tweet entry into a list. Returns None if error.''' try: if isinstance(rawline, str): line = rawline.decode('utf-8') elif isinstance(rawline, unicode): line = rawline else: raise TypeError("Line neither string nor unicode") entries = line.split("\t") if len(entries) != 12: return None # text text = entries[9] words = text.lower().split() # for i in range(len(words)-1, -1, -1): # if (re.match(url_pattern, words[i]) # or words[i] == 'rt' # or words[i].startswith("@")): # del words[i] cleaned = ["".join(ch for ch in w if ch.isalnum() or ch in set("-%/\\#@")) for w in words] matched = set([]) for word in cleaned: try: matched.add(keyword_map_b.value[word]) except KeyError: continue # if len(matched) == 0: return None # time ts = parse(entries[2]) ymd = "_".join([str(ts.year).zfill(4)]+list(map(lambda x: str(x).zfill(2), (ts.month, ts.day)))) return ymd, matched, entries[8] except: print "Failed. Tweet entries:", len(rawline.split("\t")) return None def combine_dicts(a, b): result = deepcopy(a) for keyword, pdist in b.iteritems(): if not (keyword in result): result[keyword] = deepcopy(pdist) else: for county, prob in pdist.iteritems(): try: result[keyword][county] += prob except KeyError: result[keyword][county] = prob # # # return result if __name__ == '__main__': start_time = time.time() with open("keyword_map.json") as f: keyword_map = json.load(f) # Create sparkcontext (duh) sc = SparkContext(appName = APP_NAME) keyword_map_b = sc.broadcast(keyword_map) state_codes = np.loadtxt('cont_states.tsv', delimiter='\t', skiprows=1, usecols=[0,1], dtype='|S25') state_codes = np.char.lower(state_codes) state_codes_b = sc.broadcast(state_codes) with open("normalized_loc_string2p_dist.json") as f: loc_string2p_dist = json.load(f) loc_string2p_dist_b = sc.broadcast(loc_string2p_dist) ymd_matches_rawloc = sc.textFile(path).map(lambda line: process(line, keyword_map_b)).filter(lambda x: x is not None) ymd_matches_normloc = ymd_matches_rawloc.map(lambda t: (t[0], t[1], normalize(t[2], state_codes_b))).filter(lambda t: t[2] is not None) data_with_loc_data = ymd_matches_normloc.filter(lambda t: t[2] in loc_string2p_dist_b.value) ymd_dicts = data_with_loc_data.map(lambda t: (t[0], {w : loc_string2p_dist_b.value[t[2]] for w in t[1]})) ymd_combined = ymd_dicts.reduceByKey(lambda a,b: combine_dicts(a,b)) ordered = sorted(ymd_combined.collect(), key = lambda t: t[0]) # Stop spark instance sc.stop() with open(outpath, "w") as f: for date, dict_ in ordered: f.write(date+"\t"+json.dumps(dict_)+"\n") dt = int(time.time() - start_time) timestring = str(datetime.timedelta(seconds = dt)) # If running on cluster, send push notification to my phone. if achtung: pushme("Finished running "+os.path.split(__file__)[-1] + "\nRuntime: "+timestring)
notebooks/USA twitter data parser.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data # + import pandas as pd import numpy as np df = pd.DataFrame( columns = ['cat_1', 'cat_2'] ) n = 3 m = 4 sample_size = 100 df['cat_1'] = np.random.randint(1, n, sample_size) df['cat_2'] = np.random.randint(1, m, sample_size) df['random_x'] = np.random.random(sample_size) beta = 10*np.random.rand() shift_cat1 = -1.5*np.random.rand(df['cat_1'].nunique()) shift_cat2 = -2.5*np.random.rand(df['cat_2'].nunique()) lin_model = beta* df['random_x'] + shift_cat1[df['cat_1'].values - 1] + shift_cat2[df['cat_2'].values - 1] + \ np.random.rand(sample_size) probs = ( 1 + np.exp(lin_model) ) ** -1.0 labels = np.ones(df.shape[0], ).astype(int) labels[probs < 0.5] = 0 df['random_y'] = labels # - # # Data Visualization # + import matplotlib.pyplot as plt plt.scatter( x = df['random_x'], y = df['random_y'] ) plt.show() # - # # Modelling # + from bayes_linear import BGLClassifier model = BGLClassifier(numeric_cols = ['random_x'], target_col = 'random_y', cat_cols = ['cat_1', 'cat_2'] ) # - model.fit(df) print(model.model) # # Validation Performance # + val_size = 1000 df_val = pd.DataFrame( columns = ['random_x']) df_val['random_x'] = np.random.rand(val_size) df_val['cat_1'] = np.random.randint(1, n, val_size) df_val['cat_2'] = np.random.randint(1, m, val_size) lin_model = beta* df_val['random_x'] + \ shift_cat1[df_val['cat_1'].values - 1] + shift_cat2[df_val['cat_2'].values - 1] probs = ( 1 + np.exp(lin_model) ) ** -1.0 val_labels = np.ones(df_val.shape[0], ).astype(int) val_labels[probs < 0.5] = 0 # + train_accuracy = model.score(df, labels) val_accuracy = model.score(df_val, val_labels) print('Train Mean Accuracy: ' + str(train_accuracy)) print('Validation Mean Accuracy: ' + str(val_accuracy)) # - model.model.plot() # # Non-Bayesian Performance # # ## Label Encoded Data # + from sklearn.linear_model import LogisticRegressionCV lin_reg = LogisticRegressionCV( cv = 5 ) lin_reg.fit( df[['random_x', 'cat_1', 'cat_2']], df['random_y'] ) accuracy_train = lin_reg.score, ( df[['random_x', 'cat_1', 'cat_2']], df['random_y'] ) accuracy_val = lin_reg.score( df_val[['random_x', 'cat_1', 'cat_2']], val_labels ) print('Train Mean Accuracy: ' + str(accuracy_train)) print('Validation Mean Accuracy: ' + str(accuracy_val)) # - # ## One Hot Encoded Data # + df_val['random_y'] = val_labels df['is_train'] = 1 df_val['is_train'] = 0 merged_df = pd.concat( [df, df_val], axis = 0, ignore_index = True ) def oneHotEncode(dff, cat_cols ): res = dff.copy() for cat_col in cat_cols: cat_dummies = pd.get_dummies( res[cat_col].astype(object) ) cat_dummies = cat_dummies.rename( columns = {col : cat_col + '_' + str(col) for col in cat_dummies.columns} ) res = pd.concat( [res.drop(cat_col, axis = 1), cat_dummies], axis = 1 ) return res merged_df = oneHotEncode( merged_df, ['cat_1', 'cat_2'] ) # + indices = merged_df['is_train'] == 1 df = merged_df.loc[indices, :].reset_index(drop = True) df_val = merged_df.loc[~indices, :].reset_index(drop = True) lin_reg = LogisticRegressionCV( cv = 5 ) train_cols = [ col for col in df.columns if not col in ['is_train', 'random_y'] ] lin_reg.fit( df[train_cols], df['random_y'] ) accuracy_train = lin_reg.score( df[train_cols], df['random_y']) accuracy_val = lin_reg.score( df_val[train_cols], val_labels ) print('Train Mean Accuracy: ' + str( accuracy_train ) ) print('Validation Mean Accuracy: ' + str( accuracy_val ) ) # - # # Comments # # This shows that, with the presence of white noise, the Bayesian model is more equipped to learn noisy data and make better predictions, while also taking care of the categorial differences in the data.
data/linear_classifier_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Pandas Data Types # Accompanying the PB Python article [here](http://pbpython.com/pandas_dtypes.html) import pandas as pd import numpy as np df_2 = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True") df_2 # Use df.info and df.dtypes to look at the types that pandas automatically infers based on the data df_2.info() df_2.dtypes # Since the 2016 and 2017 columns were read in as objects, trying to add the values will result in string concatenation not numerical addition df_2['2016'] + df_2['2017'] # The simplest way to to convert to a type is using astype. # # We can apply it to the customer number first. df_2['Customer Number'].astype('int') df_2 # The code above does not alter the original dataframe df_2.dtypes # Assign the new integer customer number back to the original frame and check the type df_2["Customer Number"] = df_2['Customer Number'].astype('int') df_2.dtypes df_2 # The data all looks good for the Customer Number. # # If we try to convert the Jan Units column, we will get an error. df_2['Jan Units'].astype('int') # In a similar manner we get an error if we try to convert the sales column df_2['2016'].astype('float') # We can try to use astype with a bool type but that does not give expected results df_2['Active'].astype('bool') # astype can take a dictionary of column names and data types df_2.astype({'Customer Number': 'int', 'Customer Name': 'str'}).dtypes # In order to convert the currency and percentages, we need to use custom functions def convert_currency(val): """ 125000.00 Convert the string number value to a float - Remove $ - Remove commas - Convert to float type """ new_val = val.replace(',','').replace('$', '') return float(new_val) def convert_percent(val): """ Convert the percentage string to an actual floating point percent """ new_val = val.replace('%', '') return float(new_val) / 100 # Use apply to convert the 2016 and 2017 columns to floating point numbers df_2['2016'].apply(convert_currency) df_2['2017'].apply(convert_currency) df_2 df_2['2016'].apply(convert_currency) + df_2['2017'].apply(convert_currency) # We could use a lambda function as well but it may be more difficult for new users to understand df_2['2016'].apply(lambda x: x.replace('$', '').replace(',', '')).astype('float') # Assign the converted values back to the columns df_2['2016'] = df_2['2016'].apply(convert_currency) df_2['2017'] = df_2['2017'].apply(convert_currency) df_2 # Use a lambda function to convert the percentage strings to numbers df_2['Percent Growth'] = df_2['Percent Growth'].apply(lambda x: x.replace('%', '')).astype('float') / 100 df_2['Percent Growth'] = df_2['Percent Growth'].apply(convert_percent) df_2.dtypes # Let's look at the data so far df_2 # pd.to_numeric is another option for handling column conversions when invalid values are included pd.to_numeric(df_2['Jan Units'], errors='coerce') # Fill in the NaN with 0 pd.to_numeric(df_2['Jan Units'], errors='coerce').fillna(0) # Make sure to populate the original column of data df_2["Jan Units"] = pd.to_numeric(df_2['Jan Units'], errors='coerce').fillna(0) # pd.to_datetime is very useful for working with date conversions df_2[ ['Month', 'Day', 'Year'] ] pd.to_datetime(df_2[['Month', 'Day', 'Year']]) df_2["Start_Date"] = pd.to_datetime(df_2[['Month', 'Day', 'Year']]) # Check out the dataframe df_2 # Use np.where to convert the active column to a boolean df_2["Active"] = np.where(df_2["Active"] == "Y", True, False) df_2 df_2.dtypes # Many of the examples shown above can be used when reading in data using dtypes or converters arguments df_2 = pd.read_csv("https://github.com/chris1610/pbpython/blob/master/data/sales_data_types.csv?raw=True", dtype={'Customer Number':'int'}, converters={'2016':convert_currency, '2017': convert_currency, 'Percent Growth': convert_percent, 'Jan Units': lambda x: pd.to_numeric(x, errors='coerce'), 'Active': lambda x: np.where(x == "Y", True, False) }) df_2.dtypes df_2 # This can not be applied at the time the data is read in df_2["Start_Date"] = pd.to_datetime(df_2[['Month', 'Day', 'Year']]) df_2
Pandas_Data_Types.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.environ.keys() # # Imports # + import pygrib import os import numpy as np from matplotlib import pyplot as plt from matplotlib import colors from mpl_toolkits.basemap import Basemap, addcyclic import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.figure_factory as ff import ipywidgets init_notebook_mode(connected=True) # - import mpl_toolkits mpl_toolkits.__version__ # %matplotlib inline # # # Define functions # ## Define a function that takes a list of pygrib.gribmessage objects and returns a dictionary, where the key 'data' contains a 3D tensor with data from the pygrib.gribmessage objects def grb_to_grid(grb_obj): """Takes a single grb object containing multiple levels. Assumes same time, pressure levels. Compiles to a cube""" n_levels = len(grb_obj) levels = np.array([grb_element['level'] for grb_element in grb_obj]) indexes = np.argsort(levels)[::-1] # highest pressure first cube = np.zeros([n_levels, grb_obj[0].values.shape[0], grb_obj[1].values.shape[1]]) for i in range(n_levels): cube[i,:,:] = grb_obj[indexes[i]].values cube_dict = {'data' : cube, 'units' : grb_obj[0]['units'], 'levels' : levels[indexes]} return cube_dict # ### Define functions to return outlines of countries and coastlines # + # Make trace-generating function (return a Scatter object) def make_scatter(x,y): return go.Scatter( x=x, y=y, mode='lines', line=go.scatter.Line(color="black"), name=' ' # no name on hover ) # Functions converting coastline/country polygons to lon/lat traces def polygons_to_traces(poly_paths, N_poly, basemap_map): ''' pos arg 1. (poly_paths): paths to polygons pos arg 2. (N_poly): number of polygon to convert ''' traces = [] # init. plotting list for i_poly in range(N_poly): poly_path = poly_paths[i_poly] # get the Basemap coordinates of each segment coords_cc = np.array( [(vertex[0],vertex[1]) for (vertex,code) in poly_path.iter_segments(simplify=False)] ) # convert coordinates to lon/lat by 'inverting' the Basemap projection lon_cc, lat_cc = basemap_map(coords_cc[:,0],coords_cc[:,1], inverse=True) # add plot.ly plotting options traces.append(make_scatter(lon_cc,lat_cc)) return traces # Function generating coastline lon/lat traces def get_coastline_traces(basemap_map): poly_paths = basemap_map.drawcoastlines().get_paths() # coastline polygon paths N_poly = 91 # use only the 91st biggest coastlines (i.e. no rivers) return polygons_to_traces(poly_paths, N_poly, basemap_map) # Function generating country lon/lat traces def get_country_traces(basemap_map): poly_paths = basemap_map.drawcountries().get_paths() # country polygon paths N_poly = len(poly_paths) # use all countries return polygons_to_traces(poly_paths, N_poly, basemap_map) # - # ### Define functions to read data and return plotly FigureWidget object # + def get_data_as_cube_and_lats_lons(file_path, file_name, data_type): grbs = pygrib.open(os.path.join(file_path, file_name)) grbs.messages grb = grbs.readline() lats,lons = grb.latlons() grb_data = grbs.select(name=data_type) grb_cube_data=grb_to_grid(grb_data) forecast_date = grb.validDate issuance_date = grb.analDate grbs.close() return grb_cube_data, lats, lons, forecast_date, issuance_date def rotate_data_and_lats(grb_matrix_data, lons, degrees=180): lons_rotated = lons.copy() lons_rotated[lons >= degrees] = lons[lons >= degrees] - (2*degrees) # proper rotation obtained from: https://plot.ly/ipython-notebooks/basemap-maps/ i_east = lons_rotated[0, :] >= 0 # indices of east lon i_west = lons_rotated[0, :] < 0 # indices of west lon # stack the two halves lons_rotated = np.hstack((lons_rotated[:, i_west], lons_rotated[:, i_east])) # Correspondingly, shift the data array data_rotated = np.hstack((grb_matrix_data[:,i_west], grb_matrix_data[:,i_east])) return data_rotated, lons_rotated def get_rotated_basemap(degrees=180): m_rotated = Basemap(llcrnrlon = 1 - degrees, llcrnrlat = -89, urcrnrlon = degrees, urcrnrlat = 89 , projection = 'mill', area_thresh =10000, resolution='l') return m_rotated def get_widget_figure(file_path, file_name, data_type, degrees_to_rotate=180): grb_cube_data, lats, lons, forecast_date, issuance_date = get_data_as_cube_and_lats_lons( file_path, file_name, data_type) matrix_data = grb_cube_data['data'][10] data_rotated, lons_rotated = rotate_data_and_lats(matrix_data, lons) basemap_rotated = get_rotated_basemap() anno_text = str(forecast_date) + '<br>(issued on ' + str(issuance_date) + ')' #"Data courtesy of #<a href='http://www.esrl.noaa.gov/psd/data/composites/day/'>\ #NOAA Earth System Research Laboratory</a>" axis_style = dict( zeroline=False, showline=False, showgrid=False, ticks='', showticklabels=False, ) layout1 = go.Layout( title=go.layout.Title( text=data_type, xref='paper', x=0 ), showlegend=False, hovermode="closest", # highlight closest point on hover margin = {'t':100, 'b':60, 'l':60, 'r':0, 'pad':8}, xaxis=go.layout.XAxis( axis_style, range = [-degrees_to_rotate, degrees_to_rotate] # restrict y-axis to range of lon ), yaxis=go.layout.YAxis( axis_style, range=[-85, 85] ), annotations=[ dict( text=anno_text, xref='paper', yref='paper', x=0, y=1, yanchor='bottom', showarrow=False, align='left' ) ], autosize=False, width=500, height=400 ) traces_cc = get_coastline_traces(basemap_rotated)+get_country_traces(basemap_rotated) fw = go.FigureWidget(data=traces_cc + [go.Contour(z=data_rotated, x=lons_rotated[0, :], y=lats[:, 0])], layout=layout1) return fw # - # # Define data path data_folder = os.path.join(os.path.expanduser('~'), 'Documents', 'Projects', 'WeatherWind', 'data') # # Plots side-by-side # downloaded from https://nomads.ncdc.noaa.gov/data/gfs-avn-hi/201805/20180508/ data_file = 'gfs_3_20180508_0600_027.grb2' fw1 = get_widget_figure(data_folder, data_file, "V component of wind") fw2 = get_widget_figure(data_folder, data_file, "U component of wind") # https://community.plot.ly/t/plotly-subplots-using-fig-objects-instead-of-traces/11969/4 # https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20Styling.html fig_subplots = ipywidgets.VBox([fw1, fw2], layout=ipywidgets.Layout(display='flex', flex_flow='column', align_items='center', width='100%')) # + # u is the ZONAL VELOCITY, i.e. the component of the horizontal wind TOWARDS EAST. # v is the MERIDIONAL VELOCITY, i.e. the component of the horizontal wind TOWARDS NORTH # http://tornado.sfsu.edu/geosciences/classes/m430/Wind/WindDirection.html fig_subplots # -
code/notebooks/wind_components_side_by_side.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Thouless-Anderson-Palmer mean field approximation # # [![Binder](https://mybinder.org/badge.svg)](https://mybinder.org/v2/gh/danhtaihoang/network-inference/master?filepath=sphinx%2Fcodesource%2Ftap.ipynb) # # We show in the following the performance of Thouless-Anderson-Palmer mean field approximation (TAP) in inferring couplings between variables from their configurations. Similar to nMF, TAP works well only in the regime of large sample sizes and small coupling variability. However, this method leads to poor inference results in the regime of small sample sizes and/or large coupling variability. # # Similary, we import the packages to the jupyter notebook: # + import numpy as np import sys import matplotlib.pyplot as plt import simulate import inference # %matplotlib inline np.random.seed(1) # - # As other methods, we first use the same parameter setting: $N = 100$, $g = 2.0$, and $L=2000$. # parameter setting: n = 100 g = 2.0 w0 = np.random.normal(0.0,g/np.sqrt(n),size=(n,n)) l = 2000 s = simulate.generate_data(w0,l) # We apply TAP to recover the coupling matrix `w` from variable configurations `s`. w = inference.tap(s) # We plot the heat map of inferred coupling matrix `w` and compare with the actual couplings `w0`: # + plt.figure(figsize=(11,3.2)) plt.subplot2grid((1,3),(0,0)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,1)) plt.title('predicted coupling matrix') plt.imshow(w,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,2)) plt.title('predicted couplings vs. actual couplings') plt.plot([-1,1],[-1,1],'r--') plt.scatter(w0,w) plt.xlabel('actual couplings') plt.ylabel('inferred couplings') plt.tight_layout(h_pad=1, w_pad=1.5) plt.show() # - # The mean square error between actual couplings and predicted couplings is calculated: MSE = np.mean((w-w0)**2) print('MSE:',MSE) # Now, assumming that we have much larger number of samples, $L=100000$ for instance. l = 100000 s = simulate.generate_data(w0,l) w = inference.tap(s) # The inference result for this case is shown below: # + plt.figure(figsize=(11,3.2)) plt.subplot2grid((1,3),(0,0)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,1)) plt.title('predicted coupling matrix') plt.imshow(w,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.5,0.5) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.5,0,0.5]) plt.subplot2grid((1,3),(0,2)) plt.title('predicted couplings vs. actual couplings') plt.plot([-1,1],[-1,1],'r--') plt.scatter(w0,w) plt.xlabel('actual couplings') plt.ylabel('inferred couplings') plt.tight_layout(h_pad=1, w_pad=1.5) plt.show() # - MSE = np.mean((w-w0)**2) print('MSE:',MSE) # The inference result given by this method is also bad, even for large number of samples. # # Now, let us consider a very small coupling variability, $g = 0.5$ for instance. g = 0.5 w0 = np.random.normal(0.0,g/np.sqrt(n),size=(n,n)) # The data length $L=2000$ is used as the first try. l = 2000 s = simulate.generate_data(w0,l) w = inference.tap(s) # + plt.figure(figsize=(11,3.2)) plt.subplot2grid((1,3),(0,0)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.2,0.2) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.2,0,0.2]) plt.subplot2grid((1,3),(0,1)) plt.title('predicted coupling matrix') plt.imshow(w,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.2,0.2) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.2,0,0.2]) plt.subplot2grid((1,3),(0,2)) plt.title('predicted couplings vs. actual couplings') plt.plot([-0.2,0.2],[-0.2,0.2],'r--') plt.scatter(w0,w) plt.xlabel('actual couplings') plt.ylabel('inferred couplings') plt.tight_layout(h_pad=1, w_pad=1.5) plt.show() # - MSE = np.mean((w-w0)**2) print('MSE:',MSE) # For very large number of samples, $L=100000$: l = 100000 s = simulate.generate_data(w0,l) w = inference.tap(s) # + plt.figure(figsize=(11,3.2)) plt.subplot2grid((1,3),(0,0)) plt.title('actual coupling matrix') plt.imshow(w0,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.2,0.2) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.2,0,0.2]) plt.subplot2grid((1,3),(0,1)) plt.title('predicted coupling matrix') plt.imshow(w,cmap='rainbow',origin='lower') plt.xlabel('j') plt.ylabel('i') plt.clim(-0.2,0.2) plt.colorbar(fraction=0.045, pad=0.05,ticks=[-0.2,0,0.2]) plt.subplot2grid((1,3),(0,2)) plt.title('predicted couplings vs. actual couplings') plt.plot([-0.2,0.2],[-0.2,0.2],'r--') plt.scatter(w0,w) plt.xlabel('actual couplings') plt.ylabel('inferred couplings') plt.tight_layout(h_pad=1, w_pad=1.5) plt.show() # - MSE = np.mean((w-w0)**2) print('MSE:',MSE) # Similar to nMF, TAP works well only in the limit of large sample sizes and small coupling variability.
sphinx/codesource/.ipynb_checkpoints/tap-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Meteomatics Weather / Ocean Data and Forecasts # This block provides access to a REST-style API to retrieve historic, current, and forecast data globally. # # Meteomatics provides high quality weather data in a spatial resolution up to 90m. All data are available worldwide and they are based on the best numerical models, enhanced by downscaling methods and station calibration methodology. All model data and observational data are available as time series in NetCDF format. Here we use the [NetCDF -> GeoTIFF Conversion](https://marketplace.up42.com/block/e826be64-827b-4df9-b32d-56c528b8050d) to convert the output to GeoTIFF. # # In the example, the workflow, the area of interest and the workflow parameters are defined. After running the job, the results are downloaded and visualized. For more information, refer to the block's [UP42 Marketplace page](https://marketplace.up42.com/block/235addd2-3efe-424b-8c35-d9b41dfe0eb5) and [Documentation](https://docs.up42.com/up42-blocks/data/weather-ocean-data-forecasts-meteomatics.html). import up42 # + # add credential and authenticate up42.authenticate(project_id="12345", project_api_key="12345") project = up42.initialize_project() # Construct workflow workflow = project.create_workflow(name="meteomatics-workflow", use_existing=False) # - input_tasks = ["meteomatics", "data-conversion-netcdf"] workflow.add_workflow_tasks(input_tasks) # Define the aoi berlin_example_aoi = {"type":"Feature","properties":{}, "geometry":{"type": "Polygon","coordinates": [[[13.384292,52.476387], [13.387612,52.476407], [13.387844,52.470423], [13.384436,52.470499], [13.384292,52.476387]]]}} # other options to define aoi include up42.draw_aoi(), up42.read_vector_file(), FeatureCollection, GeoDataFrame etc. # Get input parameters workflow.get_parameters_info() # Define input parameters of the workflow to run it input_parameters = workflow.construct_parameters(geometry=berlin_example_aoi, geometry_operation="intersects", start_date="2020-01-01", end_date="2020-01-02") # In the above code cell, we added th basic input parameters. However, the block accepts additional parameters as per workflow.get_parameters_info(). Please refer to the [UP42 Documentation](https://docs.up42.com/up42-blocks/data/weather-ocean-data-forecasts-meteomatics.html) and the [Meteomatics Documentation](https://www.meteomatics.com/en/api/available-parameters/standard-weather-parameter/) for additional examples of variables. You can add the parameters to our parameter configuration in the following steps: input_parameters["meteomatics:1"].update({"time_interval": 6, "variables": ['t_2m:C', 'precip_5min:mm', 'wind_speed_200m:kmh']}) # Check if everything is as expected print(input_parameters) # Price estimation workflow.estimate_job(input_parameters) # Run the actual job job = workflow.run_job(input_parameters, track_status=True) # Download and plot results job.download_results() # Each variable output (i.e wind_speed_200m, t_2m etc.) contains several bands describing the collected data at each time step. Here we plot the first band of each variable. (Lower values are visualized in lighter colors whilst higher values visualized in darker colors) job.plot_results(bands=[1], cmap="Blues")
examples/data-block-examples/meteomatics-example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pranavkantgaur/curves_and_surfaces/blob/master/parametric_representations/param_rep_intro_lec_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="OlNDegH1tiWE" colab_type="text" # ## Objectives # # # 1. Discuss limitations and merits of polygonal representation # 2. Intro to explict and implicit representations # 3. Discuss limitations of explict and implict representations # 3. Intro to parametric representation: # * Cubic polynomial vs higher degree polynomials # * Mathematical representation # 4. Introduction to the concept of _Continuity_ # # # + [markdown] id="eBV6-T3ctHUR" colab_type="text" # ## Limitations and merits of polygonal representations # * Accuracy vs Number of vertices: # * Element size vs Accuracy: # * https://qr.ae/T0mM5P # * Increase in number of elements and its effect on computation requirements: # * https://doc.cgal.org/latest/Mesh_3/index.html#title39 # # * Difficulty in local control for interactive manipulation: # * Moving the vertices without altering the toplogy: # * https://link.springer.com/chapter/10.1007/978-4-431-68093-2_29 # * Mesh refinement, remeshing, local smoothing: # * https://doc.cgal.org/latest/Polygon_mesh_processing/group__PMP__meshing__grp.html#gaa091c8368920920eed87784107d68ecf # * Ease of rendering of meshes on GPUs based on polygonal representation(especially triangle-based): # * Preferred for realtime graphics rendering: # * https://www.reddit.com/r/askscience/comments/1mqno3/why_can_gpus_only_render_polygonal_3d_forms/ccbrxju?utm_source=share&utm_medium=web2x # # + [markdown] id="_GhVKB9Eu7Hk" colab_type="text" # # General curve representations: # Polygonal representations are most suited for linear domains. For more realistic objects with smooth surfaces, polygonal representations are more suitable, given enough computational resources. Representations of curves are as follows: # # ## Explicit representation: # $y = f(x)$ # # ### Limitations: # * How to represent objects with multiple values of $y$ for single value of $x$? # * Circle, Ellipse, etc, by multiple curve-patches, one per y-value? # * Tedious interactive manipulation: # * How to implement the feature of interaction/editing the curve represented in explict form? # * Not rotationally invariant: # * How to implement rotation of the curve in explict form? # * Curves with vertical tangents, infinite slope: # * How to represent? # + id="Zb8HrUPtrQPV" colab_type="code" colab={} import matplotlib.pyplot as plt # for drawing 2D curves import plotly.graph_objects as go import math import numpy as np from numpy import arange # + id="JpotRsJaI1eu" colab_type="code" outputId="a57e7e17-9a9d-4f6e-e62d-e2d611447dcb" colab={"base_uri": "https://localhost:8080/", "height": 295} # Example of explicit representation # a straight line class ExplicitLineRepresentation(object): def __init__(self, m, c): # y = mx + c self.m = m self.c = c def generate_points_on_representation(self, x): y = self.m * x + self.c return y if __name__ == '__main__' : line = ExplicitLineRepresentation(0.3, 2) # generate list of (x, y) n_points = 100 x = [] y = [] for i in range(n_points): y.append(line.generate_points_on_representation(i)) x.append(i) plt.plot(x, y) plt.title('Explicit representation') plt.ylabel('y') plt.xlabel('x') plt.show() # + id="fHvWVkOIMDGR" colab_type="code" outputId="605a9375-bb6f-441e-8e31-49c3da38226a" colab={"base_uri": "https://localhost:8080/", "height": 295} class ExplicitCircleRepresentation(object): def __init__(self, r, x, y): self.r = r self.x = x self.y = y def getx(self): return self.x def getr(self): return self.r def generate_points_on_representation(self, x): assert(pow(self.r, 2) - pow(x - self.x, 2) >= 0) # avoid -ve in sqrt y = math.sqrt(pow(self.r, 2) - pow(x - self.x, 2)) + self.y # note only one value of y can be represented!! return y if __name__ == '__main__' : circle = ExplicitCircleRepresentation(5.0, 1.0, 1.0) # generate list of (x, y) n_points = 1000 x = [t * (circle.getx() - circle.getr()) + (1 - t) * (circle.getx() + circle.getr()) for t in arange(0, 1, 1 / n_points)] # note the parametric representation!! y = [] for i in x: y.append(circle.generate_points_on_representation(i)) plt.plot(x, y) plt.title('Explicit representation, cannot draw the lower half :(') plt.ylabel('y') plt.xlabel('x') plt.show() # + [markdown] id="YEwL3HFlJA2L" colab_type="text" # ### Test your understanding: # # 1. List curves which can easily be represented using _explict_ representation. # 2. Pseudocode of an algorithm to rotate curve implemented under _explict_ representation. # # + id="sOqIsNZFJsnt" colab_type="code" colab={} # Pseudocode for curve rotation # + [markdown] id="-BEg7ZtxJwXd" colab_type="text" # ## Implicit representation # $f(x, y, z) = 0$ # ### Overview: # * Similar computation process for coordinates as that in explicit curves. # * Can be used to represent iso-surfaces. For instance, $f(x, y, z) = k$ implies level-$k$ for the curve. Represents set of all points $(x, y, z)$, which result in the value $k$ for the curve. # * Examples: # * https://github.com/mmolero/pypoisson # * Internally maintains a implicit function representation of input data (and associated normals) and returns samples of that function in form of vertices and faces(polygons). # * http://hhoppe.com/poissonrecon.pdf # * Figure 1, check the indicator function. # # ### Limitations: # * May have more solutions than required. For instance, how to represent a half-circle? # * $x^2 + y^2 - 1 = 0, x \geq 0$ # * For two joined curved segments, it may be difficult to determine whether their tangent directions agree at their joint point:(**IMPORTANT FOR SMOOTHNESS OF CURVES**) # * Let, $f(x, y, z) = 0$ and $g(x, y, z) = 0$: # * How to define their joint points? Are they the intersection points/lines? # * How to define tangent at those joint points? # + id="6g0A07JXI5Di" colab_type="code" colab={} # Example of implicit representation # y - mx - c = 0 # ax + by + cz + d = 0 class ImplicitLineRepresentation(object): def __init__(self): None def extract_contour(self, k): ''' Returns set of points with level-k ''' None def compute_normal(self, x1, y1): ''' computes normal at point (x1, y1) ''' # + [markdown] id="Rxl3hxg5ureR" colab_type="text" # ## Merits for explicit and implicit representations # * Easy to perform _point inside/outside_ tests:(**Clipping, etc.**) # * Algorithm? # * Explicit: # * If $y_1 - f(x_1) \neq 0$ then the point $(x_1, y_1)$ does not lie on the surface: # * https://doubleroot.in/lessons/straight-line/position-of-a-point-relative-to-a-line/ # * Based on the concept of normals: # * https://math.stackexchange.com/a/274728/153195 # * Implicit: # * For instance, the Indicator function in Poisson surface reconstruction algorithm. # * Normal computation is easier:(**Visibility test**) # * Algorithm? # * Explicit: # * https://math.stackexchange.com/a/274728/153195: # * vector formulation # * Implicit: # * https://en.wikipedia.org/wiki/Implicit_curve#Tangent_and_normal_vector # # ### Test your understanding: # 1. Algorithm to perform inside/outside test on explicit representation of $y = x^2$ and $x^2 + y^2 = 1$ # 2. Algorithm to compute normal on $y = x^2$ and $x^2 + y^2 = 1$? # 3. Same as above on implicit representation. # + [markdown] id="7oiTrtSpvcl4" colab_type="text" # # Parametric representation # \begin{equation*} # x = x(t) \\ # y = y(t) \\ # z = z(t) # \end{equation*} # where, $0 \leq t \leq 1$ # # ### Merits of parametric representations # # # * Parametric slope is never infinite: # * $dy / dt$ is never infinite, even if $dy/dx$ is. Check vertical straight-line example. # * Can easily represent multiple values of $y$ for single value of $x$. # * Multiple $y$ may map to single $x$, since both vary independently along $t$. # * Check case of circle: # * Both points with same $x$ and different $y$ will have different values of $t$, therefore can co-exist in same parametric representation. # * $x(t) = rcos(t) + h, y(t) = rsin(t) + k$, where $r$ is radius and $(h,k)$ is the center. # + id="YnjFGIJxI9el" colab_type="code" outputId="71a00f5f-d18e-45e3-ab48-51406dc4a64e" colab={"base_uri": "https://localhost:8080/", "height": 295} # Example of parametric representation class ParametricLineRepresentation(object): def __init__(self, p1, p2): self.p1 = p1 self.p2 = p2 def generate_points_on_representation(self, t): # Notice the parameter 't' assert(0 <= t and t <= 1) p = self.p1 *(1 - t) + self.p2 * (t) # parametric representation of line. return p if __name__ == '__main__' : line = ParametricLineRepresentation(2, 5) # generate list of (x, y) n_points = 1000 x = [t for t in arange(0, 1, 1 / n_points)] # note the parametric representation!! y = [] for i in x: y.append(line.generate_points_on_representation(i)) plt.plot(x, y) plt.title('Parametric representation') plt.ylabel('y') plt.xlabel('t') plt.show() # + [markdown] id="3dYDJieIysdL" colab_type="text" # ## Cubic polynomials vs higher degree polynomials # * Lower than cubic: # * Too little flexibility in controlling shape: # \begin{equation*} # Q(t) = [x(t), y(t), z(t)] \\ # x(t) = a_{x}t^2 + b_{x}t + c_x\\ # y(t) = a_{y}t^2 + b_{y}t + c_y\\ # z(t) = a_{z}t^2 + b_{z}t + c_z # \end{equation*} # * Easier to compute: # * Lower number of unknowns than cubic polynomials(9 vs 12) # * Higher than cubic: # * Too computationally expensive(more than 12 unknowns) # * Unwanted wiggles (due to non-linearity) # * Cubic: # * Least degree to specify endpoints and tangent vectors, 4 coefficients. # # ### Cubic polynomial representation # \begin{equation*} # Q(t) = [x(t), y(t), z(t)] \\ # x(t) = a_{x}t^3 + b_{x}t^2 + c_{x}t + d_x\\ # y(t) = a_{y}t^3 + b_{y}t^2 + c_{y}t + d_y\\ # z(t) = a_{z}t^3 + b_{z}t^2 + c_{z}t + d_z # \end{equation*} # # \begin{equation*} # Q(t) = T.C \\ # T = # \begin{bmatrix} # t^{3} & t^{2} & t^{1} & 1 # \end{bmatrix} \\ # C = # \begin{bmatrix} # a_{x} & a_{y} & a_{z} \\ # b_{x} & b_{y} & b_{z} \\ # c_{x} & c_{y} & c_{z} \\ # d_{x} & d_{y} & d_{z} # \end{bmatrix} # \end{equation*} # # ### A demo: # * https://www.desmos.com/calculator/ulvko52x7j # * Implcit curves: # * https://www.geogebra.org/m/YBkweXKm # * Parameteric representation of 3D curves: # * https://www.geogebra.org/m/D6yBZNNB # * https://www.geogebra.org/m/EPypmb4f # # + id="-1qqsGWlx3Ah" colab_type="code" outputId="363318b4-171b-4dab-c3ec-cc7d19adec6c" colab={"base_uri": "https://localhost:8080/", "height": 542} class CubicPolynomialParametericRepresentation(object): def __init__(self, coff_matrix): self.c = coff_matrix def generate_points_on_representation(self, t): # non-vectorized implementation x = self.c[0, 0] * pow(t, 3) + self.c[1, 0] * pow(t, 2) + self.c[2,0] * pow() + self.c[3,0] y = self.c[0, 1] * pow(t, 3) + self.c[1, 1] * pow(t, 2) + self.c[2,1] * pow() + self.c[3,1] z = self.c[0, 2] * pow(t, 3) + self.c[1, 2] * pow(t, 2) + self.c[2,2] * pow() + self.c[3,2] return x, y, z def generate_points_on_representation_vectorized(self, t): t_vector = np.array([pow(t, 3), pow(t, 2), pow(t, 1), 1]) q = np.dot(t_vector, self.c) return q # 1 X 3 vector if __name__ == '__main__' : coff_matrix = np.array([[3, 2, 5],[5, 7, 9],[10, 11, 2], [2, 5, 1]]) curve = CubicPolynomialParametericRepresentation(coff_matrix) # generate list of (x, y) n_points = 1000 x = [t for t in arange(0, 1, 1 / n_points)] y = [] for i in x: y.append(curve.generate_points_on_representation_vectorized(i)) xt = [y[i][0] for i in range(n_points)] yt = [y[i][1] for i in range(n_points)] zt = [y[i][2] for i in range(n_points)] ''' plt.plot(x, xt) # xt plt.title('Parametric representation, x vs t') plt.ylabel('x') plt.xlabel('t') plt.show() plt.plot(x, yt) # yt plt.title('Parametric representation, y vs t') plt.ylabel('y') plt.xlabel('t') plt.show() plt.plot(x, zt) # zt plt.title('Parametric representation, z vs t') plt.ylabel('z') plt.xlabel('t') plt.show() ''' fig = go.Figure(data=[go.Scatter3d(x=xt, y=yt, z=zt, mode='markers', marker=dict( size=1, # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=0.8 ))]) fig.show() # + [markdown] id="lCrGlAGD1njR" colab_type="text" # ### Test your understanding # # # 1. WAP to generate curves for parametric representation of: # * Straight line # * Circle # * Ellipse # # + [markdown] id="j0NRchs22mCx" colab_type="text" # ### Tangent vector, $Q^{'}(t)$ # Represents the parametric (as opposed to _geometric_) slope: # \begin{equation*} # Q'(t) = [x'(t), y'(t), z'(t)] \\ # x'(t) = 3a_{x}t^2 + 2b_{x}t + c_{x}\\ # y'(t) = 3a_{y}t^2 + 2b_{y}t + c_{y}\\ # z'(t) = 3a_{z}t^2 + 2b_{z}t + c_{z} # \end{equation*} # # * Demo: # * https://www.geogebra.org/m/uTh6gAh2 # + id="CcKDDWI29NYC" colab_type="code" colab={} # Implement derivative curves for the input parametric curve. # + [markdown] id="00nG1Hpe28zl" colab_type="text" # ## Continuity, a measure of smoothness for curves # Intention is to create _realistic_ smooth shapes. The concept of _Continuity_ represents the notion smoothness in parametric curves. Instead of a single smooth curve, realism is often achived using _piecewise_ smooth curves or curve segments. # # * Geometric: (Viewing curve purely as a shape) # * Usually defined in terms of parameterization, but the choice of parameterization does not affect the outcome. # * If a shape is represented using parameteric representations $P_1$ and $P_2$ then the if it is $G_1$ continuous in $P_1$ then it will also be the same in $P_2$. # * $G^0$: Endpoints match # * $G^1$: Slope of curves match in direction, not neccasarily the magnitude: # * $TK_1 = \alpha TK_2$ # * $G^2$: Curvature of curves match # * Parametric: (Viewing curve as a function) # * Cannot be defined by the shape of the curve alone, it requires parameterization of the curve. # * We cannot define parametric continuity of an explict or an implicit curve. But geometric continuity of an explict or implicit representation of a curve is a valid concept. # * $C^0, C^1, C^2$ # * d${Q_{1}^n} / dt^n = d{Q_{2}^n} / dt^n$ for $C^n$. # * There are $C_1$ curves that are not $G_1$: # * **EXPLAIN, YOUR assignment!!** # # # # + [markdown] id="ucQyJcmS-xIb" colab_type="text" # ## An example of discontinuity # + id="EO0XNPKL-p2C" colab_type="code" outputId="d095d0d8-6bb4-427a-d336-2b3e30248542" colab={"base_uri": "https://localhost:8080/", "height": 295} coff_matrix1 = np.array([[3, 2, 5],[5, 7, 9],[10, 11, 2], [2, 5, 1]]) curve1 = CubicPolynomialParametericRepresentation(coff_matrix1) coff_matrix2 = np.array([[3.9, 2.9, 5.9],[5.9, 7.9, 9.9],[10.9, 11.9, 2.9], [2.9, 5.9, 1.9]]) curve2 = CubicPolynomialParametericRepresentation(coff_matrix2) # generate list of (x, y) n_points = 1000 x1 = [t for t in arange(0, 1, 1 / n_points)] y1 = [] x2 = [t for t in arange(1, 2, 1 / n_points)] y2 = [] for i in x1: y1.append(curve1.generate_points_on_representation_vectorized(i)) for i in x2: y2.append(curve2.generate_points_on_representation_vectorized(i)) xt1 = [y1[i][0] for i in range(n_points)] xt2 = [y2[i][0] for i in range(n_points)] plt.plot(x1, xt1) # xt plt.plot(x2, xt2) # xt plt.title('Parametric continuity, x vs t') plt.ylabel('x') plt.xlabel('t') plt.show() # + [markdown] id="MAKK2tCp3Vx1" colab_type="text" # ### Test your understanding: # # # * Plot (first and second) gradients of a cubic polynomial curves. # * WAP to implement a continuity estimator, given constraint matrices, $C$ as input. # * Examples where $C^1$ does not imply $G^1$. **Bonus point if you can clearly explain it to me.** # # # + id="vzaSNH3OJB8j" colab_type="code" colab={} # Example code to test continuity of input (parametric) curves def determine_curve_continuity(coff_matrix1, coff_matrix2): ''' parameteric: compute derivative from 0 to 2 of the input curve segments if values of the derivatives match at connecting points, set C[i] = 1 else, exit geometric: ''' None c = determine_curve_continuity(coff_matrix1, coff_matrix2) if (c == 0): print("Curves segments are not continous at boundary!!") else: for i in range(c.shape[0], 0, -1): if c[i] == 1: print("The curve segments have: C", i, "continuity!!") else: continue
parametric_representations/param_rep_intro_lec_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/UtkarshAIITB/IPL-Data-Analysis/blob/main/EDA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="vJblh-EI14xX" # %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px from plotly.subplots import make_subplots # + colab={"base_uri": "https://localhost:8080/", "height": 513} id="7yuHWvHlX2C0" outputId="0cbec989-d858-413a-b0bf-6d30644b5493" from google.colab import drive drive.mount('/content/drive') mt = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/CSV/matches.csv') #reading the dataset deli = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/CSV/deliveries.csv') mt.head() # + colab={"base_uri": "https://localhost:8080/", "height": 444} id="v5GTmwqIVroZ" outputId="6a98ae2c-9834-416b-9851-146b517735ff" deli.head() # + [markdown] id="iTKPEekRE_J8" # # Pre-processing of Data # + colab={"base_uri": "https://localhost:8080/"} id="la4FWtQL3vt1" outputId="3dffd46b-a31c-4bac-942f-0686ed15caf5" mt.columns #printing columns # + colab={"base_uri": "https://localhost:8080/"} id="UAdoBJ3TnCEM" outputId="38305131-030e-4af7-80a3-986dd4490ad9" print(mt.shape) mt.info() #date is an object, not datetime function # + colab={"base_uri": "https://localhost:8080/"} id="oixDccPf3v7C" outputId="dea5e7ca-a592-423c-899a-f9ab858e0472" mt.isnull().sum() #umpire 3 has a lot of null values, dropping that column # + id="SNAuNKEv3wBj" df = mt.drop(columns = ['umpire3']) null_city = df[df['city'].isna()] # + [markdown] id="40Ar0HzEyyER" # df = dropped column dataset # mt = original dataset # + colab={"base_uri": "https://localhost:8080/", "height": 697} id="sKRei2l13wE2" outputId="6c8c08b4-733b-415f-8093-de47ff3fab50" null_city # + colab={"base_uri": "https://localhost:8080/"} id="G2jBYK5B3wIB" outputId="e78b5702-45f7-4606-8ace-8bd0d45f9dbc" sub = 'Dubai International Cricket Stadium' print(df['venue'].str.find(sub).value_counts()) #This confirms only 7 rows have Dubai International Cricket Stadium which have city = NaN # + colab={"base_uri": "https://localhost:8080/"} id="nOcjMQH03wLb" outputId="e264d52a-b319-423d-c369-65bfaa9ac9bc" df['city'].fillna('Dubai', inplace = True) df.isnull().sum() #city null values removed # + [markdown] id="FKNzLS593rjJ" # Dropping rows which don't have any winner # + colab={"base_uri": "https://localhost:8080/", "height": 360} id="Nbt97um-3wOf" outputId="46164acd-ee44-4898-b088-073bbbbe1015" winner_null = df[df['winner'].isna()] winner_null #winner and playerofmatch are missing at same # + colab={"base_uri": "https://localhost:8080/"} id="ru_q48Xb3wRs" outputId="27d8e832-e9e2-49c9-a4aa-b462dc0172fa" df = df[df['winner'].notna()] df.isnull().sum() # + [markdown] id="l-CYJ3U839E5" # Checking umpire null values # + colab={"base_uri": "https://localhost:8080/", "height": 228} id="XXWE6OQX3wUz" outputId="4cf7ce75-b942-4e39-a2d9-eed0c7e5ec2c" umpire_null = df[df['umpire1'].isna()] umpire_null #umpire 1 and umpire 2 have null values at same rows # + colab={"base_uri": "https://localhost:8080/", "height": 300} id="s1w9Y2VD3wX4" outputId="f13f7b9a-8b59-4be9-bc53-87607cde574d" df.describe() # + colab={"base_uri": "https://localhost:8080/"} id="H6_qtfGO4GEh" outputId="4f9b636a-64e8-49c2-b292-4d26bff71376" df['team1'].unique() # + colab={"base_uri": "https://localhost:8080/"} id="MXczcNQR4GNC" outputId="e0b94ad0-c6bd-439a-c65e-7eae0bac15eb" df['team2'].unique() # + [markdown] id="nfbxnVovXDHT" # **Replacing:** <br> # 'Rising Pune Supergiant' = 'Rising Pune Supergiants' = 'Pune Warriors' <br> # 'Delhi Daredevils' = 'Delhi Capitals' <br> # # + colab={"base_uri": "https://localhost:8080/"} id="JrB1C--A4GQb" outputId="1a92e905-3ac0-4421-fdce-b26838961167" df['team1'] = df['team1'].replace(['Rising Pune Supergiant' , 'Rising Pune Supergiants', 'Delhi Daredevils'], ['Pune Warriors' , 'Pune Warriors', 'Delhi Capitals']) df['team2'] = df['team2'].replace(['Rising Pune Supergiant' , 'Rising Pune Supergiants', 'Delhi Daredevils'], ['Pune Warriors' , 'Pune Warriors', 'Delhi Capitals']) df['winner'] = df['winner'].replace(['Rising Pune Supergiant' , 'Rising Pune Supergiants', 'Delhi Daredevils'], ['Pune Warriors' , 'Pune Warriors', 'Delhi Capitals']) df['toss_winner'] = df['toss_winner'].replace(['Rising Pune Supergiant' , 'Rising Pune Supergiants', 'Delhi Daredevils'], ['Pune Warriors' , 'Pune Warriors', 'Delhi Capitals']) print(df['team1'].unique()) print(df['team2'].unique()) print(df['winner'].unique()) # + colab={"base_uri": "https://localhost:8080/", "height": 478} id="sREXuO-uXBEP" outputId="aa89c18f-99e2-436f-b299-4d2366d29d5f" df.head() # + colab={"base_uri": "https://localhost:8080/"} id="DR2zpEchfcFo" outputId="47336e04-3d8c-4872-8f63-ea9d3e91e0ef" # df['date'].unique() df['date'] = pd.to_datetime(df['date']) df.info() #dtype of date becomes a datetime object # + [markdown] id="BDhMDbs2ix9d" # # Encoding # # 'toss_decision' can be hot encoded <br> # 'result' can be label encoded due to a heirarchial presence <br> # 'dl_applied' is already encoded # # + colab={"base_uri": "https://localhost:8080/"} id="x4QJG0yTXBHz" outputId="04ac5929-9406-4983-c2aa-06b88b9644ed" df['toss_decision'].value_counts() # toss-decision can be hot encoded # + colab={"base_uri": "https://localhost:8080/"} id="YiS0hqPbXBK4" outputId="4c3679ad-7d2a-4c34-f8d2-ce96a1317fdf" df['result'].value_counts() #one hot encoding # + id="Mk5SJoJqbpr-" ed = df.copy() # + [markdown] id="K2U2q0KnCEE3" # No result value was removed from dataframe and hence for result too we have only two values: normal and tie, thus one hot encoding can be applied over it # + id="eP8215RAXBN2" #importing required libraries for encoding from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import LabelEncoder # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="JL7HlOW-XBQx" outputId="f12f85de-9b91-4b79-b612-a14bfbcfd0b9" dummy_td = pd.get_dummies(df.toss_decision) dummy_td.head() # + colab={"base_uri": "https://localhost:8080/", "height": 478} id="1F05XZWyXBTl" outputId="07ac99d9-5169-4535-865a-aa6e1df33b92" df = pd.concat([df, dummy_td], axis = 'columns') # df.head() df=df.drop(columns = ['toss_decision', 'bat']) # df.head() #hot encoded the toss_decision : field and bat dummy_res = pd.get_dummies(df.result) # normal and tie = two columns df = pd.concat([df, dummy_res], axis = 'columns') # df.head() df=df.drop(columns = ['result', 'tie']) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="PbpVmlArCXnd" outputId="5ce4336d-b55c-4799-8475-9ec3e35844a2" df.shape #perfect # + [markdown] id="Mepw1TN0FWsY" # # EDA # + [markdown] id="j6sDv3MhiTM9" # ## Total Matches played # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="GKqx9UiDiSQ9" outputId="7e52316a-eae1-428f-9c0f-4fe352c48f60" # ticktext=['2008', '2009', '2010','2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019'] fig = px.histogram(mt, x="season", title = 'Total Matches', labels = { 'season' : 'Season', 'count' : 'Count' } ) fig.update_layout(bargap=0.2, font=dict( size=20), title={ 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top' } ) fig.show() # + [markdown] id="gN_O3C1IbxBS" # ## Man of the Match # + colab={"base_uri": "https://localhost:8080/", "height": 644} id="xwfy5S0kCXrr" outputId="e92ab2df-274c-4f69-b7ba-79060854097d" mom = df['player_of_match'].value_counts()[:10].rename_axis('Name').reset_index(name='count') plt.figure(figsize=(20,10)) sns.set_style("darkgrid") sns.set(font_scale = 1.5) sns.barplot(x = mom['Name'], y = mom['count'], dodge=False) plt.xlabel('Players') plt.ylabel('No. of times') plt.title('Man of the Match') plt.savefig('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/Plots/mom.png') plt.show() # + [markdown] id="V3BYGaelMtG4" # <NAME> followed by <NAME> have the largest number of MoM # + [markdown] id="yTdBB745b33V" # ## Max wins # + colab={"base_uri": "https://localhost:8080/", "height": 478} id="ujHHJDPTCXvB" outputId="f93252a2-1726-4f73-a4e4-ec726b8ee19a" df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 426} id="MnoI2V__CXxy" outputId="d668e1c0-dcea-4ffd-c9ab-e832d5130a27" mw = df['winner'].value_counts().rename_axis('Team').reset_index(name='wins') mw # + colab={"base_uri": "https://localhost:8080/"} id="lzzbjCNkXBe7" outputId="ebcf2c12-c7a1-448d-8383-419d03dd7662" df['winner'].value_counts()[:1] #printing the best # + colab={"base_uri": "https://localhost:8080/", "height": 394} id="uqvPU_HvXBj5" outputId="af5a9cb3-3106-4af3-c0d7-bdf73c79315e" win = pd.DataFrame() year = [] for i in range(2008, 2019): ss = df[df.season == i] win = pd.concat([win , ss['winner'].value_counts()[:1].rename_axis('Team').reset_index(name='wins')], axis = 'rows') year.append([i]) yr = pd.DataFrame(year , columns = ['Year']) yr.reset_index(drop=True, inplace=True) win.reset_index(drop=True, inplace=True) win = pd.concat([win, yr], axis = 'columns', ) win # + id="BzxKYyqFXBm7" # fig_dim = (22,15) # fig, ax = plt.subplots(figsize = fig_dim) # sns.barplot(x = win['Team'], y = win['wins'], hue = win['Year']) # def change_width(ax, new_value) : # for patch in ax.patches : # current_width = patch.get_width() # diff = current_width - new_value # patch.set_width(new_value) # patch.set_x(patch.get_x() + diff * .5) # change_width(ax, 0.35) # plt.xlabel('Teams') # plt.ylabel('Matches Won') # plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Cjf1ZinKULsQ" outputId="f613e524-ee1a-4114-cc95-8b39ff61a3e2" win["Year"] = win["Year"].astype(str) figa = px.bar(win, x = 'Year', y='wins', color = 'Team', title = 'Max Matches won each year team-wise') figa.update_layout( font=dict( size=20), title={ 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top' } ) figa.show() # + id="YT8xRr7mzh0m" # + [markdown] id="IhemKcyQcAZ5" # ## Hosting Preferences # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="0LFAutmxqQRT" outputId="ddf771de-fafc-4f92-8a83-ff0420b807e1" host = df['city'].value_counts()[:15].rename_axis('City').reset_index(name='Match') # plt.figure(figsize=(8,8)) # plt.pie(host['Match'], labels = host['City'], wedgeprops={'edgecolor':'black'}, autopct='%1.1f%%') # plt.tight_layout() # plt.show() fig = px.pie(host, values = 'Match', names = 'City', title = 'Top 15 Hosting Cities') fig.update_layout( font=dict( size=15), title={ 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top' } ) fig.show() # + [markdown] id="eHhhbm-dRydi" # Mumbai hosted the maximum number of matches followed by Kolkata # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Ug2UWY2GzgMM" outputId="66310c84-1790-4fa4-f5ee-c5a31718980d" venueop = df['venue'].value_counts().rename_axis('Venue').reset_index(name='cnt') # venueop.head() fig = px.bar(venueop, x="cnt", y="Venue", orientation = 'h',labels = { 'cnt': 'No. of times' }) # fig.update_layout(autosize=False, # width=1500, # height=1000 # # paper_bgcolor="LightSteelBlue") # ) fig.update_layout( title_text = 'Famous Venues', yaxis=dict(autorange="reversed") ) fig.show() # + [markdown] id="XybQRpywcK0f" # ## Toss Analysis # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="KQLJ3SKWZGPq" outputId="cbe7b094-2963-49db-fa5a-0e18d63e4bef" toss_win = df['toss_winner'].value_counts().rename_axis('Team').reset_index(name='Toss_wins') total_games = df['team1'].value_counts()+df['team2'].value_counts() total_games = total_games.rename_axis('Team').reset_index(name='Total_Matches').sort_values('Total_Matches', ascending = False).reset_index().drop(columns = 'index') toss = pd.merge(toss_win, total_games, on ='Team') toss = pd.merge(toss, mw, on='Team') toss['Percent_Tosswin'] = (toss['Toss_wins']/ toss['Total_Matches']) * 100 toss['Percent_wins'] = (toss['wins']/toss['Total_Matches'])*100 toss = toss.sort_values('Percent_wins', ascending = False).reset_index().drop(columns = 'index') import plotly.graph_objects as go fig = go.Figure() fig.add_trace(go.Bar(x=toss['Team'], y=toss['Percent_wins'], name='Percent_wins', )) fig.add_trace(go.Bar(x=toss['Team'], y=toss['Percent_Tosswin'], name='Percent_Tosswin', )) fig.update_layout(barmode='group', bargap=0.15, # gap between bars of adjacent location coordinates. bargroupgap=0.0, # gap between bars of the same location coordinate. title_text = 'Comparison of wins and toss percentage', font=dict( size=15), title={ 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top' } ) fig.show() # + id="N1scMAZSzcK2" # + [markdown] id="HXkB0SbdyO35" # **Does winning toss helps in clinching win?** <br> # Influence = Won both toss and match = 52.3% # ~Influence = Won toss and lost = 47.7% # Hence winning the toss has a slight advantage # + colab={"base_uri": "https://localhost:8080/", "height": 534} id="qbVz2jYFltWa" outputId="e4119e62-e48d-4216-b70c-d5ac2d4cb49c" stat = df[['team1', 'team2','toss_winner', 'winner']] print(stat.shape) # stat.head() stat = stat.assign(res = "") stat['res'] = np.where(df['toss_winner'] == df['winner'], True, False) # stat.head() fig = px.pie(stat, values = stat['res'].value_counts(), names = ['Influence', '~Influence'], title = 'Toss win helps?', hole = 0.4, color_discrete_sequence=px.colors.sequential.RdBu) fig.update_layout(autosize=False, width=500, height=500, font=dict( size=15), title={ 'y':0.9, 'x':0.5, 'xanchor': 'center', 'yanchor': 'top' } # paper_bgcolor="LightSteelBlue") ) fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 224} id="KC-DJ1Lto9Rx" outputId="95019066-55a1-4316-fc24-de9a124c576b" print(stat.shape) stat.head() # + id="uuOaQ9W4o9Ut" #val1 = won both toss and the game #val2 = won toss but couldn't the game #val3 = lost both toss and the game #val4 = lost the toss but won the game #val5 = total games by the team # + id="UGK7xXDBZGky" def piechart(team): val1 = stat[ (stat['toss_winner'] == team) & (stat['winner'] == team) ].count() val2 = stat[ (stat['toss_winner'] == team) & (stat['winner'] != team) ].count() val3 = stat[ (stat['toss_winner'] != team) & (stat['winner'] != team) & ( ( stat['team1'] == team) | (stat['team2'] == team) ) ].count() val4 = stat[ (stat['toss_winner'] != team) & (stat['winner'] == team) ].count() val5 = stat[( ( stat['team1'] == team) | (stat['team2'] == team) )].count() fig = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}, {'type':'domain'}]]) fig.add_trace( go.Pie(values = [val1[1], val2[1]], labels = ['Won', 'Lost'], name="Toss win"), 1,1 ) fig.add_trace( go.Pie(values = [val4[1], val3[1]], labels = ['Won', 'Lost'], name="Toss Lost"), 1,2 ) fig.update_traces(hole=.4, hoverinfo="label+percent") fig.update_layout( title_text = team, annotations=[dict(text='Toss_win', x=0.18, y=0.5, font_size=20, showarrow=False), dict(text='Toss_lost', x=0.82, y=0.5, font_size=20, showarrow=False)] ) fig.show() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ywcyEZciUWAu" outputId="c99001ea-aea1-431a-ea59-4e6add03970e" team_list = df['team1'].unique() for team in team_list: piechart(team) # + id="28bK6vRcdCd9" colab={"base_uri": "https://localhost:8080/", "height": 372} outputId="512ce992-c9fc-4bae-db2c-c306ad4e0cbd" ed.drop(columns = ['id', 'umpire1', 'umpire2', 'venue'], inplace = True) ed.head() # + id="x7fgQ1n0dChK" colab={"base_uri": "https://localhost:8080/"} outputId="3e8abfa7-45b9-4f01-ed53-ba5d9b809aef" toss_yearwise = ed.groupby(by = 'season')['toss_decision'].value_counts() toss_yearwise # + id="cXenakSOdCsq" colab={"base_uri": "https://localhost:8080/", "height": 624} outputId="de637795-a329-4527-a35c-7dcaac596f52" plt.figure(figsize=(20,10)) sns.set(font_scale = 1.5) sns.countplot(x='season',hue='toss_decision',data=ed) plt.savefig('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/Plots/tossd.png') plt.show() # fig = px.histogram(ed, x="season", color = 'toss_decision') # fig.update_layout(bargap=0.2) # fig.show() # + id="JCneXSVZiMkW" colab={"base_uri": "https://localhost:8080/"} outputId="0eb9ec46-e993-46e1-d09a-8bf5ea4222d8" team_year = ed.groupby(by = 'season')['winner'].value_counts() team_year # + id="ZQswdRLPWuFf" colab={"base_uri": "https://localhost:8080/", "height": 76} outputId="a88cdb67-5198-43fb-aecb-63a197c7ecd6" sns.color_palette() # + [markdown] id="vVMomThzlpMV" # ## Marginal wins by runs and wickets # + id="dFJO7owIeDZi" colab={"base_uri": "https://localhost:8080/", "height": 496} outputId="fde16b21-0a66-4134-d3f4-72d2aa2d46d9" mt.head() # + id="rugm1fFweXdi" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="7be62f5b-a629-4350-98d9-f8fcb6e195ba" run_win = mt.sort_values('win_by_runs', ascending = False) run_win['Versus'] = run_win['team1'] + ' vs ' + run_win['team2'] run_win = run_win[['Versus', 'win_by_runs']].reset_index().drop(columns = 'index') # run_win.head() # plt.figure(figsize=(20,10)) fig = px.bar(run_win.head(), x="win_by_runs", y="Versus", orientation='h') fig.show() # + id="MbMPgIHReurD" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="72069c4a-91f2-4909-b8ad-bb97ffa203f7" wick_win = mt.sort_values('win_by_wickets', ascending = False).head() wick_win['Versus'] = wick_win['team1'] + ' vs ' + wick_win['team2'] wick_win = wick_win[['Versus', 'win_by_wickets']].reset_index().drop(columns = 'index') fig = px.bar(wick_win.head(), x="win_by_wickets", y="Versus", orientation='h') fig.show() # + [markdown] id="VfiXZRZomkov" # ## Team1 vs Team2 stats # + id="-dr1jA8MmKOn" colab={"base_uri": "https://localhost:8080/", "height": 478} outputId="c30f08bb-0d2c-481d-e009-57cae50d77a4" df.head() # + id="qQr4ZYGUTDPK" def teamvsteam(team1, team2): matka = df[ ( (df['team1'] == team1) & (df['team2'] == team2) ) | ( (df['team1'] == team2) & (df['team2'] == team1) ) ] matka = matka[['season','team1', 'team2', 'winner']].reset_index().drop(columns = 'index') plt.figure(figsize=(20,10)) sns.set(font_scale = 1.5) sns.countplot(x='season', data = matka, hue = 'winner') plt.savefig('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/Plots/team1vsteam2.png') plt.show() # + id="9Teg_8kNTl4g" colab={"base_uri": "https://localhost:8080/"} outputId="46132771-b789-42b5-db59-03f1c37f8ba4" df['team1'].unique() # + id="6f_uYjseTycK" colab={"base_uri": "https://localhost:8080/", "height": 624} outputId="0aa9dc72-5c6e-4496-e04e-03036461e2a4" teamvsteam('Mumbai Indians', 'Chennai Super Kings' ) # + [markdown] id="Sfoc0F0B-lWo" # ## Batsman Comparison # + id="uqyJSEtQWLVX" colab={"base_uri": "https://localhost:8080/", "height": 444} outputId="71a9421b-1e50-4fdf-9db7-15f6fa679d65" deli.head() # + id="uFyqlyp55o9T" def tell(num): # i = 0 lst = [] columns = ['batsman', 'total_runs'] for i in range(num): batsman = input('Enter batsman name: ') bat = deli[deli['batsman'] == batsman] val = bat['batsman_runs'].sum() print(val) lst.append([batsman, val]) bore = pd.DataFrame(lst, columns = columns) fig = px.scatter(bore, x = 'batsman', y = 'total_runs', size = 'total_runs') return fig # + id="uWh4co8ZzgQo" colab={"base_uri": "https://localhost:8080/", "height": 889} outputId="15056003-acd5-43d9-ef24-8840c8026b0d" tell(10) # + id="kls9RUulEdCA" # BCJ Cutting # <NAME> # SR Watson # STR Binny # TS Mills # MC Henriques # SW Billings # <NAME> # JD Unadkat # <NAME> # <NAME> # VY Mahesh # CH Gayle # <NAME> # <NAME> # RG Sharma # DA Warner # YK Pathan # SR Watson # SK Raina # <NAME> # <NAME> # + id="H4I9KIpDEdU0" lis = deli['batsman'].unique() # + id="Gp703-HxEdYZ" colab={"base_uri": "https://localhost:8080/"} outputId="24cd50dd-6d44-41c9-8a7a-cb6f269d2bee" print(lis[208]) # + id="ZZdRwJLwEdbO" # + id="xSk42_cFEdd6" colab={"base_uri": "https://localhost:8080/", "height": 444} outputId="ed907fac-7816-41ae-c8bb-3e7fa1267ca7" deli.head() # + id="qHuztzPdQ5h3" # thak = deli['batsman'].unique() # len(thak) # + id="F6C4QLCuEeVA" # save = deli[deli['batsman'] == 'DA Warner'] # + id="TNXoMGbPEeYG" # save.head() # + [markdown] id="9lCuOutxBvhF" # ## Most 50's and 100's # + id="iKGecZo8Eea-" colab={"base_uri": "https://localhost:8080/", "height": 461} outputId="81d61f31-9dd2-4e3d-afc0-e21b1940af81" ball = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/IPL Score_Analysis/CSV/IPL Ball-by-Ball 2008-2020.csv') print(ball.shape) ball.head() # + id="zEMhoRLFEedl" ball.drop(columns = ['bowler', 'extra_runs', 'total_runs', 'non_boundary', 'is_wicket', 'dismissal_kind', 'player_dismissed', 'fielder', 'extras_type'], inplace = True) # + id="YBEvwh5nWqGO" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="d0fcb9c9-a33e-45c7-f0b2-2c872464b8e8" ball.head() # + id="v42gCYdGcCup" lst_f = [] columns = ['Batsman', 'Fifties', 'Hundreds'] list_bat = ball['batsman'].unique() # print(len(list_bat)) for batsman in list_bat: save = ball[ball['batsman'] == batsman] save_copy = save.copy() save_copy = save_copy.groupby(by = ['id']).sum()['batsman_runs'].reset_index() fifty = 0 hund = 0 for runs in save_copy.batsman_runs: # print(runs) if ( (runs >= 50) and (runs<100) ): fifty+=1 elif (runs>=100): hund+=1 else: pass lst_f.append([batsman, fifty, hund]) fun = pd.DataFrame(lst_f, columns = columns) # + id="WZXx48UXeId7" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="5bbcfd86-a750-4043-ce98-fa4350e61873" pachas = fun.sort_values('Fifties', ascending = False).reset_index().drop(columns = 'index') pachas.head() # + id="QlTFfghrfsie" colab={"base_uri": "https://localhost:8080/"} outputId="09c9b4f5-426c-415d-8cad-6f719795508e" pachas.shape # + id="lV2RmhM8gLNr" # pachas.iloc[160] # + id="QV11UxuigsUS" pachas = pachas.iloc[:-377] # + id="-QPuU7ZKfbDH" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="2d976c5a-e3a1-4e97-b2a2-4dbab0485bbc" fig = px.line(pachas, x="Batsman", y="Fifties", title='Fifties plot') fig.show() # + id="Tkg6TZzcfxSX" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="e0b29643-a521-4ed4-f959-cad4b26a76b9" sau = fun.sort_values('Hundreds', ascending = False).reset_index().drop(columns = 'index') sau.head() # + id="WB8kSxoqhDpm" # sau.iloc[36] # + id="8UiRGdKKhR5S" sau = sau.iloc[:36] # + id="8T4Ynrl8f-Xk" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="108d5a1d-f3ae-43a3-a624-489fe3a6926a" fig = px.line(sau, x="Batsman", y="Hundreds", title='Hundreds plot') fig.show() # + [markdown] id="az7ATk75nS40" # # Inferences # * 2011, 2012 and 2013 have more number of matches as compared to other years # * Most Valuable Player : <NAME> followed by <NAME> # * MI and CSK had the highest number of wins three times each thus being the most successful teams # * Famous Cities for hosting: Mumbai (Wankhede Stadium), Kolkata (Eden Gardens), Delhi (Firoz Shah) # * Half of the teams have percent win > percent toss win while the other half of teams have opposite stats. # * Luckiest team : Deccan Chargers but they don't have that high percentage # of wins, whereas KKR has same perecentage of wins and toss wins. # * Overall Toss win doesn't help much as only 52.3% of matches had toss influenced # * Toss losing has been fatal for Gujarat Lions as well as Deccan Chargers. It didn't matter much to top teams like MI and CSK who had high win % irrespective of losing or winning the toss. # * Upto 2013 Batting remained a popular option upon winning the toss but afterwards it found a steep decline and fielding is the popular opinion now. # + id="3CvgpilaWuJB" # + id="njrPEL0MWuME"
EDA.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from pathlib import Path import seaborn as sns import warnings warnings.filterwarnings('ignore') from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler from sklearn.model_selection import train_test_split # # Reading the CSV files + data cleansing train_df = pd.read_csv(Path('Resources/2019loans.csv')) test_df = pd.read_csv(Path('Resources/2020Q1loans.csv')) train_df.head() test_df.head() # + # preparing dataset for tranining data # + x_tr = train_df.drop(columns = ["loan_status"]) y_tr = train_df["loan_status"] x_tr.head() # + # preparing dataset for testing data # + x_te = test_df.drop(columns = ["loan_status"]) y_te = test_df["loan_status"] x_te.head() # + # Convert categorical data to numeric and separate target feature for training data # - x_tr = pd.get_dummies(x_tr) # + # Convert categorical data to numeric and separate target feature for testing data # - x_te = pd.get_dummies(x_te) # + # add missing dummy variables to testing set # - for c in x_tr.columns: if c not in x_te.columns: x_te[c] = 0 # # Model --> Fit --> Predict # + # Train the Logistic Regression model on the unscaled data and print the model score # + risk_model = LogisticRegression() risk_model.fit(x_tr, y_tr) predicted = risk_model.predict(x_tr) # - risk_model.score(x_tr, y_tr) risk_model.score(x_te, y_te) # + # Train a Random Forest Classifier model and print the model score # + forest_classifier_mode = RandomForestClassifier(n_estimators=1000, random_state=66) forest_model = forest_classifier_mode.fit(x_tr, y_tr) # - forest_model.score(x_tr, y_tr) forest_model.score(x_te, y_te) # + # Scale the data # + scaler = StandardScaler().fit(x_tr) x_tr_scaled = scaler.transform(x_tr) x_te_scaled = scaler.transform(x_te) # + # Train the Logistic Regression model on the scaled data and print the model score # + risk_model.fit(x_tr_scaled, y_tr) predicted = risk_model.predict(x_tr_scaled) # - risk_model.score(x_tr_scaled, y_tr) # + risk_model.score(x_te_scaled, y_te) # + # Train a Random Forest Classifier model on the scaled data and print the model score # + forest_classifier_mode = RandomForestClassifier(n_estimators=1000, random_state=66) forest_model = forest_classifier_mode.fit(x_tr_scaled, y_tr) # - forest_model.score(x_tr_scaled, y_tr) forest_model.score(x_te_scaled, y_te)
Credit Risk Evaluator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/takgeun-Oh/mypluralize/blob/master/wineClassification_LogisticResgression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="wwHqPkTkU3nf" import pandas as pd import numpy as np # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="QHrIlQTKU9PE" outputId="3ec08b1d-af01-4419-c21d-94a1ea9ef7d3" dat_wine=pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/' 'wine/wine.data',header=None) dat_wine.head() # + id="RoDVHZGQVCkr" dat_wine.columns = ['class label', 'alchohol', 'malic acid', 'ash', 'alcalinity of ash', 'magnesium', 'total phenols', 'flavanoids', 'nonflavanoid phenols', 'proanthocyanins', 'color intensity', 'hue', 'OD208', 'proline'] # Column names # + colab={"base_uri": "https://localhost:8080/"} id="wRkSbGTvVIfX" outputId="5823e27c-e639-41b6-8115-84ad219cfe2c" print('class label:', np.unique(dat_wine['class label'])) # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="VgMjJBN7VPSY" outputId="df780594-ef81-4b57-9aa2-d20e995271d5" dat_wine.head() # + id="-IUEZVlNVTvb" from sklearn.model_selection import train_test_split X = dat_wine.drop(['class label'], axis=1) y = dat_wine['class label'] # + id="ZB5hdP9wWAZM" X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y) # + id="IZByudYtWS5M" from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train_std = sc.fit_transform(X_train) X_test_std = sc.fit_transform(X_test) # + id="RlFLjZ6PXrJg" from sklearn.linear_model import LogisticRegression lr1_10 = LogisticRegression(penalty='l1', C=10.0, solver='saga') lr1_1 = LogisticRegression(penalty='l1', C=1.0, solver='saga') lr1_0_1 = LogisticRegression(penalty='l1', C=0.1, solver='saga') lr2_10 = LogisticRegression(penalty='l2', C=10.0, solver='saga') lr2_1 = LogisticRegression(penalty='l2', C=1.0, solver='saga') lr2_0_1 = LogisticRegression(penalty='l2', C=0.1, solver='saga') # + colab={"base_uri": "https://localhost:8080/"} id="cRrTRjXRYfBC" outputId="cbf7f0fa-e0fc-4f12-b003-71f9fc639d0a" lr2_10.fit(X_train_std, y_train) print('Training accuracy with L2 and C=10.0:', lr2_10.score(X_train_std, y_train)) print('Test accuracy with L2 and C=10.0:', lr2_10.score(X_test_std, y_test)) # + id="19YPGyX2ecnq"
wineClassification_LogisticResgression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import datetime import urllib.request import os, errno import zipfile as zp import os, errno import datetime as dt import pandas as pd import os, errno import numpy as np import matplotlib.pyplot as plt import plotly.graph_objs as go import plotly.offline as pyo current_path = os.getcwd() try: os.makedirs(f'{current_path}/csv/') except OSError as e: if e.errno != errno.EEXIST: raise # making download since 2013 for actual date for year in range(2013, datetime.datetime.now().year + 1): if not os.path.exists(f'{current_path}/csv/{year}.zip'): url = f'http://www.portaltransparencia.gov.br/download-de-dados/viagens/{year}' urllib.request.urlretrieve(url, f'{current_path}/csv/{year}') print(f'Downloading for {year} was complete with success.') # extract and remove zip file for year in range(2013, dt.datetime.now().year + 1): print(f'unzip {year} file.') with zp.ZipFile(f'{current_path}/csv/{year}', 'r') as zip_ref: zip_ref.extractall(f'{current_path}/csv/{year}_extract') print(f'unzip {year} was successful.') os.remove(f'{current_path}/csv/{year}') passages_df = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Passagem.csv', sep=';', encoding = "ISO-8859-1") passages_df.head() # + passages_df.columns = [ 'PassageId', 'MeanOfTransportOneWay', 'CountryOriginOneWay', 'DistrictOriginOneWay', 'CityOriginOneWay', 'CountryDestinyOneWay', 'DistrictDestinyOneWay', 'CityDestinyOneWay', 'CountryOriginReturn', 'DistrictOriginReturn', 'CityOriginReturn', 'CountryDestinyReturn', 'DistrictDestinyReturn', 'CityDestinyReturn', 'TicketValue', 'ValueOfFare'] total_null = passages_df.isnull().sum() total_records = passages_df.shape[0] print(f'Total Records: {total_records}, Total columns: {passages_df.shape[1]}') passages_info = pd.DataFrame(passages_df.dtypes).T.rename(index={0:'Column type'}) passages_info = passages_info.append(pd.DataFrame(total_null).T.rename(index={0: 'Null Values'})) passages_info = passages_info.append(pd.DataFrame(round(total_null/total_records * 100, 2)).T.rename(index={0: 'Null Values (%)'})) passages_info # - passages_df.head() # + passages_info2 = {} for column in passages_df: passages_info2[column] = len(passages_df.loc[passages_df[column] == 'Sem Informação']) passages_info2 # - passages_df = passages_df.drop('PassageId', axis=1) passages_df = passages_df.drop('CountryOriginReturn', axis=1) passages_df = passages_df.drop('DistrictOriginReturn', axis=1) passages_df = passages_df.drop('CityOriginReturn', axis=1) passages_df = passages_df.drop('CountryDestinyReturn', axis=1) passages_df = passages_df.drop('CityDestinyReturn', axis=1) passages_df = passages_df.drop('DistrictDestinyReturn', axis=1) passages_df = passages_df.drop('ValueOfFare', axis=1) passages_df['TicketValue'] = passages_df['TicketValue'].apply(lambda x: x.replace(',', '.')) passages_df.head() passages_df['TicketValue'] = pd.to_numeric(passages_df['TicketValue'], errors='coerce') passages_df.info() passages_df.head() # + ticket_value_avg = passages_df['TicketValue'].mean() print(round(ticket_value_avg,2)) passages_df.loc[passages_df['TicketValue'] == 0.0, 'TicketValue'] = ticket_value_avg # - passages_df.loc[passages_df['TicketValue'] == 641.17] passages_df.groupby(['MeanOfTransportOneWay']).sum() # + means_transport = pd.DataFrame(passages_df.groupby(['MeanOfTransportOneWay']).sum().reset_index()) means_transport.columns = ['MeanTransport', 'Total'] means_transport_sorted = means_transport.sort_values(by='Total', ascending=False) data = [] for item in list(means_transport_sorted['MeanTransport'].unique()): data.append( go.Bar( x = list(means_transport_sorted[means_transport_sorted['MeanTransport']==item]['MeanTransport']), y = list(means_transport_sorted[means_transport_sorted['MeanTransport']==item]['Total']), name= item ) ) layout = go.Layout( barmode='group', title = 'Costs with travels in 2014' ) fig = go.Figure(data=data, layout=layout) pyo.iplot(fig) # - passages_df.groupby(['CountryOriginOneWay']).sum() passages_df.CountryOriginOneWay.unique() passages_df.loc[passages_df.CountryOriginOneWay == 'COLÔMBIA', 'CountryOriginOneWay'] = 'Colômbia' passages_df.loc[passages_df.CountryOriginOneWay == 'COLÔMBIA'] passages_df.loc[passages_df.CountryOriginOneWay == 'ESTADOS UNIDOS DA AMÉRICA', 'CountryOriginOneWay'] = 'Estados Unidos da América' passages_df.loc[passages_df.CountryOriginOneWay == 'ESTADOS UNIDOS DA AMÉRICA'] passages_df.loc[passages_df.CountryOriginOneWay == 'PANAMÁ', 'CountryOriginOneWay'] = 'Panamá' passages_df.loc[passages_df.CountryOriginOneWay == 'PANAMÁ'] passages_df.loc[passages_df.CountryOriginOneWay == 'FINLÂNDIA', 'CountryOriginOneWay'] = 'Finlândia' passages_df.loc[passages_df.CountryOriginOneWay == 'FINLÂNDIA'] passages_df.loc[passages_df.CountryOriginOneWay == 'REPÚBLICA DOMINICANA', 'CountryOriginOneWay'] = 'República Dominicana' passages_df.loc[passages_df.CountryOriginOneWay == 'REPÚBLICA DOMINICANA'] passages_df.loc[passages_df.CountryOriginOneWay == 'ÍNDIA', 'CountryOriginOneWay'] = 'Índia' passages_df.loc[passages_df.CountryOriginOneWay == 'ÍNDIA'] passages_df.loc[passages_df.CountryOriginOneWay == 'REINO UNIDO', 'CountryOriginOneWay'] = 'Reino Unido' passages_df.loc[passages_df.CountryOriginOneWay == 'REINO UNIDO'] passages_df.loc[passages_df.CountryOriginOneWay == 'SUÍÇA', 'CountryOriginOneWay'] = 'Suiça' passages_df.loc[passages_df.CountryOriginOneWay == 'SUÍÇA'] passages_df.loc[passages_df.CountryOriginOneWay == 'EQUADOR', 'CountryOriginOneWay'] = 'Equador' passages_df.loc[passages_df.CountryOriginOneWay == 'EQUADOR'] passages_df.loc[passages_df.CountryOriginOneWay == 'AUSTRÁLIA', 'CountryOriginOneWay'] = 'Austrália' passages_df.loc[passages_df.CountryOriginOneWay == 'AUSTRÁLIA'] passages_df.groupby(['CountryOriginOneWay']).sum().reset_index() # + country_origin = pd.DataFrame(passages_df.groupby(['CountryOriginOneWay']).sum().reset_index()) country_origin.columns = ['CountryOrigin', 'Total'] country_origin_sorted = country_origin.sort_values(by='Total', ascending=False) country_origin_sorted_top = country_origin_sorted.iloc[:20,:] data = [] for item in list(country_origin_sorted_top['CountryOrigin'].unique()): data.append( go.Bar( x = list(country_origin_sorted_top[country_origin_sorted_top['CountryOrigin']==item]['CountryOrigin']), y = list(country_origin_sorted_top[country_origin_sorted_top['CountryOrigin']==item]['Total']), name= item ) ) layout = go.Layout( barmode='group', title = 'Top 20 Countries - Travels 2014' ) fig = go.Figure(data=data, layout=layout) pyo.iplot(fig) # - dtypes(passages_df.ValueOfPassage) # passages_df.groupby(['MeanOfTransportOneWay']).sum() # + # collecting info of pagamento.csv pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1").head() # - pd.read_csv(f'{current_path}/csv/2014_extract/2014_Trecho.csv', sep=';', encoding = "ISO-8859-1").head() # + passagem = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Passagem.csv', sep=';', encoding = "ISO-8859-1") pagamento = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") trecho = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Trecho.csv', sep=';', encoding = "ISO-8859-1") # firstly - get 'idenficador do processo de viagem' in trecho.cvs, and after search in passagem.csv and pagamento.csv # second - Use 'Origem Data' for compare the time # second - Compare 'Origem País', 'Origem UF' and 'Origem Cidade' to collect information for anothers csv trecho.head() # passagem[passagem['Identificador do processo de viagem'] == 10011602] # + pd.options.display.float_format = '${:,.2f}'.format tickets = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Passagem.csv', sep=';', encoding = "ISO-8859-1") format_tickets = [] for ticket in tickets["Valor da passagem"]: format_tickets.append(pd.to_numeric(ticket.replace(",","."))) tickets["Valor da passagem"] = format_tickets tickets.groupby("Meio de transporte").sum()["Valor da passagem"] # + pd.options.display.float_format = '${:,.2f}'.format current_path = os.getcwd() tickets = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Passagem.csv', sep=';', encoding = "ISO-8859-1") format_tickets = [] for ticket in tickets["Valor da passagem"]: format_tickets.append(pd.to_numeric(ticket.replace(",","."))) tickets["Valor da passagem"] = format_tickets tickets.groupby("UF - Origem ida").sum()["Valor da passagem"] # + pd.options.display.float_format = '${:,.2f}'.format tickets = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Passagem.csv', sep=';', encoding = "ISO-8859-1") format_tickets = [] for ticket in tickets["Valor da passagem"]: format_tickets.append(pd.to_numeric(ticket.replace(",","."))) tickets["Valor da passagem"] = format_tickets tickets.groupby("UF - Destino ida").sum()["Valor da passagem"] # - pd.options.display.float_format = '${:,.2f}'.format pagamento = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") pagamento.head() # + pd.options.display.float_format = '${:,.2f}'.format payments = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") format_payments = [] for payment in payments["Valor"]: format_payments.append(pd.to_numeric(payment.replace(",","."))) payments["Valor"] = format_payments payments.groupby("Nome do órgão superior").sum()["Valor"] # + pd.options.display.float_format = '${:,.2f}'.format payments = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") format_payments = [] for payment in payments["Valor"]: format_payments.append(pd.to_numeric(payment.replace(",","."))) payments["Valor"] = format_payments payments.groupby("Nome do órgao pagador").sum()["Valor"] # + pd.options.display.float_format = '${:,.2f}'.format payments = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") format_payments = [] for payment in payments["Valor"]: format_payments.append(pd.to_numeric(payment.replace(",","."))) payments["Valor"] = format_payments payments.groupby("Nome da unidade gestora pagadora").sum()["Valor"] # + pd.options.display.float_format = '${:,.2f}'.format payments = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") format_payments = [] for payment in payments["Valor"]: format_payments.append(pd.to_numeric(payment.replace(",","."))) payments["Valor"] = format_payments payments.groupby("Tipo de pagamento").sum()["Valor"] # + pd.options.display.float_format = '${:,.2f}'.format payments = pd.read_csv(f'{current_path}/csv/2014_extract/2014_Pagamento.csv', sep=';', encoding = "ISO-8859-1") format_payments = [] for payment in payments["Valor"]: format_payments.append(pd.to_numeric(payment.replace(",","."))) payments["Valor"] = format_payments payments.groupby("Tipo de pagamento").std()["Valor"]
hermes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 8 : Data Loading and Manipulation and Visulatiozation (Facies) # ### You can use the following liberaries for your assignment: # > Numpy, Pandas, Matplotlib, Seaborn, LASIO, Welly # ## Kindly load the las file of well1513.csv file from the data folder # ## Perform the below Tasks: # # >1. Investigate the component of the data file (number of columns , numbers of observations, Null values, normal statistics) # 2. Plot well logs together with Facies column (FORCE_2020_LITHOFACIES_LITHOLOGY) as striplog (facies log) # 3. How many classes in the facies log. # 4. How many data points per each class. # # # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb import lasio import welly from IPython.display import display # - df = pd.read_csv("C:/Users/HP/Documents/GitHub/GeoML-2.0/10DaysChallenge/well1513.csv", delimiter=',') display(df) # + #1. Data Investigation display('Col, Rows',df.shape) display('info',df.info()) display('Null values',df.isna().sum()) display('Null values%',df.isna().sum()/ df.shape[0] *100) display('normal statistics',df.describe()) # - #2. Plot well logs together with Facies column (FORCE_2020_LITHOFACIES_LITHOLOGY) as striplog (facies log) df.keys() len(df.keys()) df.head() df.sample(10) # + #removing string data df_remove=df.copy() del df_remove['Unnamed: 0'] del df_remove['WELL'] del df_remove['X_LOC'] del df_remove['Y_LOC'] del df_remove['Z_LOC'] del df_remove['GROUP'] del df_remove['FORMATION'] df_remove.keys() # - len(df_remove.keys()) # + rows, cols = 1, df_remove.shape[1] cmap = plt.cm.CMRmap fig,ax = plt.subplots(nrows=rows, ncols=cols, figsize=(20,10), sharey=True) names = [ 'DEPTH_MD', 'CALI', 'RSHA', 'RMED', 'RDEP', 'RHOB', 'GR', 'SGR', 'NPHI', 'PEF', 'DTC', 'SP', 'BS', 'ROP', 'DTS', 'DCAL', 'DRHO', 'MUDWEIGHT', 'RMIC', 'ROPA', 'RXO', 'FORCE_2020_LITHOFACIES_LITHOLOGY', 'FORCE_2020_LITHOFACIES_CONFIDENCE'] colrs = [ 'red', 'green', 'blue', 'orange', 'blue', 'black', 'green', 'violet'] for i in range(cols): ax[i].plot(df_remove.iloc[:,i], df_remove.DEPTH_MD ,linewidth='1.5', color = cmap(i/25)) ax[i].set_ylim(max(df_remove.DEPTH_MD), min(df_remove.DEPTH_MD)) ax[i].minorticks_on() ax[i].grid(which='major', linestyle='dashed', linewidth='1', color='black') ax[i].grid(which='minor', linestyle=':', linewidth='0.5', color='black') ax[i].set_title('%s' %names[i]) # - from striplog import Legend, Striplog legend = Legend.builtin('NSDOE') strip = Striplog.from_image('C:/Users/HP/Documents/GitHub/welly/tutorial/data/P-130_25_2618.png', 25, 2618, legend=legend) strip.plot(aspect=2) #3. Number of classes in the facies log display('Number of Facies classes:',df["FORCE_2020_LITHOFACIES_LITHOLOGY"].nunique()) #4. Number of data points per each class: display('Number of data points per each Facies class:', df.value_counts("FORCE_2020_LITHOFACIES_LITHOLOGY"))
RY Day8 of 10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # funcX Tutorial # # funcX is a Function-as-a-Service (FaaS) platform for science that enables you to convert almost any computing resource into a high-performance function serving device. To do this, you deploy a funcX endpoint agent on the resource, which integrates it into the function serving fabric, allowing you to dynamically send, monitor, and receive results from function invocations. funcX is built on top of [Parsl](https://parsl-project.org), enabling a funcX endpoint to use large compute resources via traditional batch queues, where funcX will dynamically provision, use, and release resources on-demand to fulfill function requests. The function service fabric, which is run centrally as a service, is hosted in AWS. # # Here we provide an example of using funcX to register a function and run it on a publicly available tutorial endpoint. # ## funcX Client # # We start by instantiating a funcX client as a programmatic means of communicating with the function service fabric. The client allows you to: # - Register functions # - Register containers and execution environments # - Launch registered functions against endpoints # - Check the status of launched functions # - Retrieve outputs from functions # # #### Authentication # # Instantiating a client will force an authentication flow where you will be asked to authenticate with Globus Auth. Every interaction with funcX is authenticated to allow us to enforce access control on both functions and endpoints. As part of the authentication process we request access to your identity information (to retrieve your email address), Globus Groups management access, and Globus Search. We require Groups access in order to facilitate sharing. Globus Search allows funcX to add your functions to a searchable registry and make them discoverable to permitted users (as well as yourself!). # + from funcx.sdk.client import FuncXClient fxc = FuncXClient() # - # Next we define a Python function, which we will later register with funcX. This function simply sums its input. # # When defining a function you can specify \*args and \*\*kwargs as inputs. # # ##### Note: any dependencies for a funcX function must be specified inside the function body. def funcx_sum(items): return sum(items) # ## Registering a function # # To use a function with funcX, you must first register it with the service, using `register_function`. You can optionally include a description of the function. # # The registration process will serialize the function body and transmit it to the funcX function service fabric. # # Registering a function returns a UUID for the function, which can then be used to invoke it. func_uuid = fxc.register_function(funcx_sum, description="tutorial summation", public=True) print(func_uuid) # ## Searching a function # # You can search previously registered functions to which you have access using `search_function`. The first parameter `q` is searched against all the fields, such as author, description, function name, and function source. You can navigate through pages of results with the `offset` and `limit` keyword args. # # The object returned is simple wrapper on a list, so you can index into it, but also can have a pretty-printed table. # # To make use of the results, you can either just use the `function_uuid` field returned for each result, or for functions that were registered with recent versions of the service, you can load the source code using the search results object's `load_result` method. search_results = fxc.search_function("tutorial", offset=0, limit=5) print(search_results[0]) print(search_results) search_results.load_result(0) result_0_uuid = search_results[0]['function_uuid'] # ## Running a function # # To invoke (perform) a function, you must provide the function's UUID, returned from the registration process, and an `endpoint_id`. Note: here we use the funcX public tutorial endpoint, which is running on AWS. # # The client's `run` function will serialize any \*args and \*\*kwargs, and pass them to the function when invoking it. Therefore, as our example function simply takes an arg input (items), we can specify an input arg and it will be used by the function. Here we define a small list of integers for our function to sum. # # The Web service will return the UUID for the invokation of the function, which we call a task. This UUID can be used to check the status of the task and retrieve the result. endpoint_uuid = '4b116d3c-1703-4f8f-9f6f-39921e5864df' # Public tutorial endpoint # + items = [1, 2, 3, 4, 5] res = fxc.run(items, endpoint_id=endpoint_uuid, function_id=func_uuid) print(res) # - # You can now retrieve the result of the invocation using `get_result()` on the UUID of the task. # # ##### Note: We remove the task from our database once the result has been retrieved, thus you can only retireve the result once. fxc.get_result(res) # ## Running batches # # You might want to invoke many function calls at once. This can be easily done via the batch interface: # + def squared(x): return x**2 squared_uuid = fxc.register_function(squared, searchable=False) inputs = list(range(10)) batch = fxc.create_batch() for x in inputs: batch.add(x, endpoint_id=endpoint_uuid, function_id=squared_uuid) batch_res = fxc.batch_run(batch) # - fxc.get_batch_status(batch_res) # ## Catching exceptions # # When functions fail, the exception is captured, and reraised when you try to get the result. In the following example, the 'deterministic failure' exception is raised when `fxc.get_result` is called on the failing function. # + def failing(): raise Exception("deterministic failure") failing_uuid = fxc.register_function(failing, searchable=False) res = fxc.run(endpoint_id=endpoint_uuid, function_id=failing_uuid) # - fxc.get_result(res)
examples/Tutorial.ipynb
-- -*- coding: utf-8 -*- -- # Bomen - vervolg -- -- In dit hoofdstuk behandelen we algemene (n-aire) bomen, zoals je die bijvoorbeeld tegenkomt in de Elm HTML en SVG-libraries. -- -- We laten bovendien het type van de node in het midden: we kunnen later een boom maken met getallen als knoop, of strings, of een ander type. -- import Html exposing (..) import Svg exposing (..) import Svg.Attributes exposing (..) import String exposing (..) type Tree a = Node a (List (Tree a)) | Nil -- + main = Html.text "Hello" -- compile-code -- - -- ## Horizontale vorm -- -- Een bekende manier om een boom (hiërarchie) weer te geven is de horizontale vorm, waarbij de wortel van de boom "links boven" staat. -- De takken van de boom staan dan lager en ingesprongen. -- -- Deze vorm kom je bijvoorbeeld tegen bij de inhoudsopgave van een boek, of de lijst van bestanden in een filesysteem. import Html exposing (..) import String exposing (..) -- + type Tree a = Node a (List (Tree a)) type alias Toc = Tree String printToc : Toc -> List (Html msg) printToc toc = printSubtocs [toc] "" printSubtocs : List Toc -> String -> List (Html msg) printSubtocs tocs pref = case tocs of (Node x s) :: xs -> let head = Html.div [] [Html.text (pref ++ " " ++ x)] subs = printSubtocs s (pref ++ "...") next = printSubtocs xs pref in head :: subs ++ next [] -> [] -- - toc1 = Node "Titel" [hoofdstuk1, hoofdstuk2, hoofdstuk3] hoofdstuk1 = Node "Hoofdstuk 1" [sectie11, sectie12, sectie13] sectie11 = Node "Sectie1-1" [] sectie12 = Node "Sectie1-2" [] sectie13 = Node "Sectie1-3" [] hoofdstuk2 = Node "Hoofdstuk 2" [sectie21, sectie22, sectie23] sectie21 = Node "Sectie2-1" [] sectie22 = Node "Sectie2-2" [] sectie23 = Node "Sectie2-3" [] hoofdstuk3 = Node "Hoofdstuk 3" [sectie31, sectie32, sectie33] sectie31 = Node "Sectie2-1" [] sectie32 = Node "Sectie2-2" [] sectie33 = Node "Sectie2-3" [] -- + main = div [] (printToc toc1) -- compile-code -- - -- ## Opmerkingen -- -- Volgens mij hebben we hier geen Nil meer nodig: een lege lijst met subbomen is voldoende om aan te geven dat er geen subbomen zijn. Een boom zelf bestaat uit 1 wortel; we hebben dan geen lege boom.... (of: de lege boom is een boom zonder subbomen? Eigenlijk is hier sprake van een "bos", nl. een lijst van bomen.) -- -- Vgl. een bestandssysteem: een bestand is een "document", zonder subbomen; of een map, met subbomen. De wortel van een bestandssysteem is altijd een map (`/`) - niet een lege boom. -- -- **Opdracht** gebruik in plaats van div, een ul/li-structuur. -- -- **Opdracht** openklappen? -- ## Tekenen van een n-aire boom -- -- * tekenen van de subbomen gescheiden door een vaste breedte -- * tekenen van de bovenliggende knoop, in het midden van de totale breedte -- * per subboom heb je ook een "midden" nodig om de verbinding tussen de bovenliggende knoop en de subbomen te tekenen -- * hoe "midden" is dat midden? - bij 2 subbomen, (of, bij een even aantal subbomen?) in het midden van de tussenruimte. -- * bij meerdere subbomen (of bij een oneven aantal subbomen?): in het midden van de middelste boom? -- * eenvoudigste oplossing (voorlopig): het midden van de totale breedte. -- -- import Html exposing (..) import Svg exposing (..) import Svg.Attributes exposing (..) import String exposing (..) -- + type Tree a = Node a (List (Tree a)) type alias Toc = Tree String unitwidth = 50 unitheight = 30 translate : (Int, Int) -> List (Svg msg) -> Svg msg translate (x, y) lst = Svg.g [transform ("translate(" ++ fromInt(x) ++ "," ++ fromInt(y) ++ ")")] lst twidth : List (Tree a) -> Int twidth trees = case trees of (Node head subs) :: next -> let headw = unitwidth subsw = twidth subs maxw = Basics.max headw subsw in maxw + (twidth next) [] -> 0 drawToc : Toc -> List (Svg msg) drawToc toc = drawSubtocs [toc] 0 drawSubtocs : List Toc -> Int -> List (Svg msg) drawSubtocs tocs shift = case tocs of (Node x s) :: xs -> let head = translate (shift + unitwidth // 2, 0) [Svg.text_ [] [Svg.text (x ++ "-" ++ (fromInt maxw))]] subs = translate (0, unitheight) (drawSubtocs s (shift + 50)) maxw = Basics.max unitwidth (twidth s) next = drawSubtocs xs (shift + maxw + unitwidth) in head :: [subs] ++ next [] -> [] hseparator = 15 vseparator = 30 elemwidth = 50 -- bij de breedte van een lijst moeten we rekening houden met 3 gevallen: -- de lege lijst (0), 1 element (geen separator), meerdere elementen (separators) treesWidth : List (Int, Svg msg) -> Int treesWidth lst = case lst of (tw, t) :: snd :: ts -> tw + hseparator + treesWidth (snd :: ts) (tw, t) :: [] -> tw [] -> 0 -- het resultaat vran drawTrees is een lijst van getekende bomen. -- elke boom heeft een eigen breedte; we hebben de breedte van de totale lijst -- nog niet bepaald, dat komt pas bij de omvattende boom (als die er is.) -- eventueel later: per deel-boom ook een "midden-punt". drawTrees : List Toc -> List (Int, Svg msg) drawTrees lst = case lst of t :: ts -> let (headwidth, headsvg) = drawTree t 0 tail = drawTrees ts in (headwidth, headsvg ) :: tail [] -> [] pushdownTrees : List (Int, Svg msg) -> Int -> List (Svg msg) pushdownTrees lst pref = case lst of (tw, t) :: ts :: x -> (translate (pref, vseparator) [t]) :: (pushdownTrees (ts :: x) (pref + tw + hseparator)) (tw, t) :: [] -> [translate (pref, vseparator) [t]] [] -> [] drawTree : Toc -> Int -> (Int, Svg msg) drawTree tree pref = case tree of Node t subs -> let subtrees = drawTrees subs subwidth = if subtrees == [] then elemwidth else treesWidth subtrees -- head = translate (pref + subwidth // 2, 0) [Svg.text_ [] [Svg.text (t ++ "-" ++ (fromInt subwidth))]] head = translate (pref + subwidth // 2, 0) [Svg.text_ [] [Svg.text (t)]] placedtrees = pushdownTrees subtrees pref in (subwidth, (translate (0, 0) (head::placedtrees))) -- een Tree heeft maar 1 alternatief... drawT : Toc -> Svg msg drawT toc = let (w, t) = drawTree toc 0 in t -- - toc1 = Node "Titel" [hoofdstuk1, hoofdstuk2, hoofdstuk3] hoofdstuk1 = Node "H 1" [sectie11, sectie12, sectie13] sectie11 = Node "S 1-1" [] sectie12 = Node "S 1-2" [] sectie13 = Node "S 1-3" [] hoofdstuk2 = Node "H 2" [sectie21, sectie22, sectie23] sectie21 = Node "S 2-1" [] sectie22 = Node "S 2-2" [] sectie23 = Node "S 2-3" [] hoofdstuk3 = Node "H 3" [sectie31, sectie32, sectie33] sectie31 = Node "S 2-1" [] sectie32 = Node "S 2-2" [] sectie33 = Node "S 2-3" [] -- + main = svg [ width "800" , height "400" , viewBox "0 0 800 400" -- , stroke "black" -- , strokeWidth "0.5" -- , fill "None" ] [ (translate (10, 10) [Svg.text_ [] [text "hi"]]) , (translate (100,10) [drawT toc1]) ] -- ++ ( drawToc sectie11 ) ++ [Svg.text "hi"]) -- compile-code -- - node = translate (lw + unitwidth // 2 - swidth str, 0) [Svg.text_ [] [text str]] -- + unitwidth = 30 unitheight = 40 twidth : Tree -> (Int, Int) twidth t = case t of Node str left right -> let (lw, _) = twidth left (rw, _) = twidth right in (lw + unitwidth + rw, lw + (unitwidth // 2)) Nil -> (0, 0) swidth str = length str * 4 -- - drawLine : (Int, Int) -> (Int, Int) -> Svg msg drawLine (xa, ya) (xb, yb) = Svg.line [ x1 (fromInt xa) , y1 (fromInt ya) , x2 (fromInt xb) , y2 (fromInt yb) , stroke "black" , strokeWidth "1" ] [] drawTree : Tree -> Svg msg drawTree t = case t of Node str Nil Nil -> translate (unitwidth // 2 - swidth str, 0) [Svg.text_ [] [text str]] Node str Nil right -> let (rw, rm) = twidth right rtree = translate (unitwidth, unitheight) [drawTree right] node = translate (unitwidth // 2 - swidth str, 0) [Svg.text_ [] [text str]] rightedge = drawLine (unitwidth // 2, 5) (unitwidth + rm, unitheight - 15) in Svg.g [] [node, rtree, rightedge] Node str left Nil -> let (lw, lm) = twidth left ltree = translate (0, unitheight) [drawTree left] node = translate (lw + unitwidth // 2 - swidth str, 0) [Svg.text_ [] [text str]] leftedge = drawLine (lw + unitwidth // 2, 5) (lm, unitheight - 15) in Svg.g [] [ltree, node, leftedge] Node str left right -> let (lw, lm) = twidth left (rw, rm) = twidth right ltree = translate (0, unitheight) [drawTree left] rtree = translate (lw + unitwidth, unitheight) [drawTree right] node = translate (lw + unitwidth // 2 - swidth str, 0) [Svg.text_ [] [text str]] leftedge = drawLine (lw + unitwidth // 2, 5) (lm, unitheight - 15) rightedge = drawLine (lw + unitwidth // 2, 5) (lw + unitwidth + rm, unitheight - 15) in Svg.g [] [ltree, node, rtree, leftedge, rightedge] Nil -> Svg.text_ [] [text "."] -- + tree0 = Nil tree1 = Node "hi" Nil Nil tree2 = Node "Hoi" tree1 tree1 tree3 = Node "moi" tree2 tree2 tree4 = Node "goeie" (tree3) (Node "hi" (Nil) (Node "???" tree1 Nil) ) test x = Svg.g [] [ translate (10, 10) [drawTree tree4] ] main = svg [ width "800" , height "400" , viewBox "0 0 800 400" -- , stroke "black" -- , strokeWidth "0.5" -- , fill "None" ] [ test 1 ] -- compile-code
.ipynb_checkpoints/bomen-2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from google_trans_new import google_translator def google_translate_e_to_j(sentence): translator = google_translator() translate_text = translator.translate(sentence,lang_src='en', lang_tgt='ja') return(translate_text) # - def input_sentence(): sentence = input('Enter your sentence:') return(sentence) # + ## Here is main script, input arguement is sentence what you want to translate ## Return arguement is translated sentence # Define input arguement sentence = input_sentence() # Print the sentence what you want to translate print(sentence) # Now use google translate API with input attribute pre-input sentence # This tells you how this function works output = google_translate_e_to_j(sentence) print(output) # + # Read sentence from xlxs file at desired colums and put them in the list import openpyxl wb = openpyxl.load_workbook('example_poc.xlsx') ws = wb.worksheets[0] trans_en = [] trans_ja = [] # Read Column F and put them in the list # Function of check null column doesn't implemented yet. for cell in ws['F']: trans_en.append(cell.value) #print(trans_en) # Translate sentence to Japanese and append them in the list name, trans_ja for i in range(len(trans_en)): if i == 0: i+1 else: #print(type(trans_en[i])) sentence = trans_en[i] trans_ja.append(google_translate_e_to_j(sentence)) # # Print translated result # for j in range(len(trans_ja)): # print(trans_ja[j]) # + # Now write translated sentence your desired cell in the xlxs file. wb = openpyxl.load_workbook('example_poc.xlsx') # Specify the worksheet where you work ws = wb.worksheets[0] for youso in range(len(trans_ja)): cell = 'G{}'.format(int(youso)+2) # print(cell) # print(trans_ja[youso]) ws[cell] = trans_ja[youso] # print(ws[cell]) # save result as your preffered file name. # wb.save('your_favorite_filename.xlsx') wb.save('example_poc_add_jpn.xlsx') # -
translate_script.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Draft analysis # ## <NAME> # ### 12/18/2019 # + [markdown] pycharm={"name": "#%% md\n"} # ### Import libraries # + pycharm={"is_executing": false, "name": "#%%\n"} import pandas as pd import numpy as np from matplotlib import pyplot as plt from data_processor import DataProcessor from anharmonic_oscillator import AnharmonicOscillator # - # ### Data processing # + pycharm={"name": "#%%\n", "is_executing": false} btc_data_processor = DataProcessor(pd.read_csv('./data/BTC_USD_2013-10-01_2019-12-15-CoinDesk.csv')) btc_log_return = btc_data_processor.get_log_return_pdf_norm() # - # ### Construct anharmonic oscillator # + pycharm={"name": "#%%\n", "is_executing": false} omega = 600000 third = 0 fourth = 600000000000 fifth = 0 ao = AnharmonicOscillator(interval_length=0.001, data_size=2256, boundary=0.4, omega=omega, h_bar=1, m=1, coefficient=[third, fourth, fifth]) # + pycharm={"name": "#%%\n", "is_executing": false} max_level = 10 # x = np.linspace(-2, 100, max_level) # w = np.exp(x)[::-1] w = [1**i for i in range(max_level, 0, -1)] pdf = w[0] * ao.get_pdf_norm_graph(0) for i in range(1, max_level): pdf += w[i] * ao.get_pdf_norm_graph(i) pdf /= sum(w) plt.plot(btc_log_return.index, btc_log_return, label='BTC') plt.plot(ao.position, pdf, label='Simulation') # plt.plot(ao.position, np.diag(ao.potential), label='Potential') plt.legend() plt.show() # + pycharm={"name": "#%%\n", "is_executing": false} w = [100, 50, 50, 35 ,25, 25, 15, 15, 10, 10, 1, 1] pdf = w[0] * ao.get_pdf_norm_graph(0) pdf += w[1] * ao.get_pdf_norm_graph(1) pdf += w[1] * ao.get_pdf_norm_graph(2) pdf += w[1] * ao.get_pdf_norm_graph(3) pdf += w[1] * ao.get_pdf_norm_graph(4) pdf += w[1] * ao.get_pdf_norm_graph(5) pdf += w[1] * ao.get_pdf_norm_graph(6) pdf += w[1] * ao.get_pdf_norm_graph(7) pdf += w[1] * ao.get_pdf_norm_graph(8) pdf += w[2] * ao.get_pdf_norm_graph(10) pdf += w[2] * ao.get_pdf_norm_graph(20) pdf += w[2] * ao.get_pdf_norm_graph(30) pdf += w[2] * ao.get_pdf_norm_graph(40) pdf += w[2] * ao.get_pdf_norm_graph(50) pdf += w[2] * ao.get_pdf_norm_graph(60) pdf += w[3] * ao.get_pdf_norm_graph(100) pdf += w[4] * ao.get_pdf_norm_graph(200) pdf += w[5] * ao.get_pdf_norm_graph(201) pdf += w[6] * ao.get_pdf_norm_graph(300) pdf += w[7] * ao.get_pdf_norm_graph(301) pdf += w[8] * ao.get_pdf_norm_graph(500) pdf += w[9] * ao.get_pdf_norm_graph(501) pdf += w[10] * ao.get_pdf_norm_graph(700) pdf += w[11] * ao.get_pdf_norm_graph(701) pdf /= sum(w) plt.plot(btc_log_return.index, btc_log_return, label='BTC') plt.plot(ao.position, pdf, label='Simulation') plt.legend() plt.show() # + pycharm={"name": "#%%\n", "is_executing": false} max_level = 700 w = np.array([100 for i in range(700)]) w[1:] = w[1:] - 1 for i in range(1, 10): w[i] = w[i] - i w[10:] = w[10:] - 20 w[150:] = w[150:] - 20 w[250:] = w[250:] - 20 w[300:] = w[300:] - 10 w[600:] = w[600] - 5 pdf = 10 *w[0] * ao.get_pdf_norm_graph(0) for i in range(1, max_level): pdf += w[i] * ao.get_pdf_norm_graph(i) pdf /= sum(w) plt.plot(btc_log_return.index, btc_log_return, label='BTC') plt.plot(ao.position, pdf, label='Simulation') # plt.plot(ao.position, np.diag(ao.potential), label='Potential') plt.legend() plt.show() print(w)
test/test_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jcmachicao/uc_curso_modelamientopredictivo/blob/master/modpred__02B.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="5-uV7qR6Uk8v" # #### Cuaderno 02B # # **Curso Modelamiento Predictivo** # --- # * **Ampliación de Arbol de Decisiones y Cross Validation** # * Autor: <NAME> # * Licencia: [GestioDinámica](http://www.gestiodinamica.com) 2020 # + id="kZ0ke4tjNDHv" import pandas as pd import matplotlib.pyplot as plt # + id="jnDmp3CcNGnP" ruta = 'drive/My Drive/2020 Cursos/2020 Modelamiento Predictivo/uc_modpred_materiales/' # + id="fTAN6Yk8NU0J" data = pd.read_csv(ruta + 'modpred_foldercompartido/clientes_tienda.csv') # + id="zdGFp1WNN2ay" outputId="a8fc7db4-415b-49cf-88d3-b58f1aab851c" colab={"base_uri": "https://localhost:8080/", "height": 197} data.columns = ['id', 'genero', 'edad', 'ingreso', 'gasto'] data.head() # + id="abLURcS-OFn8" data['gen_num'] = pd.factorize(data.genero)[0] # + id="hHEA98VUOVSt" X = data.drop(['genero', 'gen_num'], axis=1) y = data['gen_num'] # + id="lQcwhzdxOtKe" from sklearn.tree import DecisionTreeClassifier, plot_tree # + id="vNnG3FggOxww" model = DecisionTreeClassifier(max_depth=2) # + id="jnsHenNgO5Y2" outputId="8b5e7fb8-0ce3-45fa-8035-1cb34521ce12" colab={"base_uri": "https://localhost:8080/", "height": 123} model.fit(X, y) # + id="xHxLgay6O9Qo" data['predic'] = model.predict(X) # + id="d2oKAl7HpkjR" outputId="51de9d60-176a-44c6-ad71-92a039ddc8e8" colab={"base_uri": "https://localhost:8080/", "height": 197} data.head() # + [markdown] id="ZKKTNb71CEOr" # ### Tipos de cálculo de las ramas en un árbol (2 principales) # Gini Index # * Calculado restando la suma del cuadrado de las probabilidades de la primera clase considerada en las variables de entrada. Favorece las particiones grandes. La intuición es que mide qué tan lejos está una categoría de una distribución perfecta. # # Information Gain # * Multiplica la probabilidad de la clase por el logaritmo (base 2) de la probabilidad de la clase, favoreciendo las partes más pequeñas con valores distintos. # + id="Grd6YNwmprNR" outputId="751778bb-d30f-46fb-b4ec-3bd6cd68c5cd" colab={"base_uri": "https://localhost:8080/", "height": 574} plt.figure(figsize=(18,10)) plot_tree(model, max_depth=2, rounded=True, fontsize=11, label='all', filled=True, feature_names=X.columns) plt.show() # lo más importante cuando se interpreta un árbol de decisión es la estrutura # + id="1pffKfubPJx6" from sklearn.model_selection import cross_val_score # + id="v48uJfLhUGOx" outputId="c4156199-462b-49d5-bb35-ffab7762c2a1" colab={"base_uri": "https://localhost:8080/", "height": 34} scores = cross_val_score(model, X, y, cv=10) scores # + id="7jioObTOUX_u" outputId="597164d8-b52d-4636-bad9-e4fecb12884b" colab={"base_uri": "https://localhost:8080/", "height": 282} plt.plot(scores)
modpred__02B.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from tqdm.auto import tqdm from numpy.random import seed import numpy.matlib seed(42) import tensorflow as tf tf.random.set_seed(42) from tensorflow import keras from keras import backend as K from sklearn import model_selection from sklearn.preprocessing import StandardScaler, MinMaxScaler,LabelEncoder, StandardScaler from sklearn.preprocessing import QuantileTransformer def root_mean_squared_per_error(y_true, y_pred): return K.sqrt(K.mean(K.square( (y_true - y_pred)/ y_true))) es = tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=20, verbose=0, mode='min',restore_best_weights=True) plateau = tf.keras.callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.2, patience=7, verbose=0, mode='min') # - pd.set_option('display.max_columns',None) train = pd.read_pickle('./input/train_248.pkl') test = pd.read_pickle('./input/test_247.pkl') colNames = [col for col in list(train.columns) if col not in {"stock_id", "time_id", "target", "row_id"}] len(colNames) # + # kfold based on the knn++ algorithm out_train = pd.read_csv('./input/train.csv') out_train = out_train.pivot(index='time_id', columns='stock_id', values='target') #out_train[out_train.isna().any(axis=1)] out_train = out_train.fillna(out_train.mean()) out_train.head() # code to add the just the read data after first execution # data separation based on knn ++ nfolds = 5 # number of folds index = [] totDist = [] values = [] # generates a matriz with the values of mat = out_train.values scaler = MinMaxScaler(feature_range=(-1, 1)) mat = scaler.fit_transform(mat) nind = int(mat.shape[0]/nfolds) # number of individuals # adds index in the last column mat = np.c_[mat,np.arange(mat.shape[0])] lineNumber = np.random.choice(np.array(mat.shape[0]), size=nfolds, replace=False) lineNumber = np.sort(lineNumber)[::-1] for n in range(nfolds): totDist.append(np.zeros(mat.shape[0]-nfolds)) # saves index for n in range(nfolds): values.append([lineNumber[n]]) s=[] for n in range(nfolds): s.append(mat[lineNumber[n],:]) mat = np.delete(mat, obj=lineNumber[n], axis=0) for n in range(nind-1): luck = np.random.uniform(0,1,nfolds) for cycle in range(nfolds): # saves the values of index s[cycle] = np.matlib.repmat(s[cycle], mat.shape[0], 1) sumDist = np.sum( (mat[:,:-1] - s[cycle][:,:-1])**2 , axis=1) totDist[cycle] += sumDist # probabilities f = totDist[cycle]/np.sum(totDist[cycle]) # normalizing the totdist j = 0 kn = 0 for val in f: j += val if (j > luck[cycle]): # the column was selected break kn +=1 lineNumber[cycle] = kn # delete line of the value added for n_iter in range(nfolds): totDist[n_iter] = np.delete(totDist[n_iter],obj=lineNumber[cycle], axis=0) j= 0 s[cycle] = mat[lineNumber[cycle],:] values[cycle].append(int(mat[lineNumber[cycle],-1])) mat = np.delete(mat, obj=lineNumber[cycle], axis=0) for n_mod in range(nfolds): values[n_mod] = out_train.index[values[n_mod]] # - out_train qt_train = [] train_nn=train[colNames].copy() test_nn=test[colNames].copy() for col in tqdm(colNames,total=len(colNames)): #print(col) qt = QuantileTransformer(random_state=21,n_quantiles=2000, output_distribution='normal') # 将每个特征缩放在同样的范围或分布下。通过执行一个秩转换能够使异常的分布平滑化,并且能够比缩放更少的收到离群值的影响。 train_nn[col] = qt.fit_transform(train_nn[[col]]) test_nn[col] = qt.transform(test_nn[[col]]) qt_train.append(qt) train_nn[['stock_id','time_id','target']] = train[['stock_id','time_id','target']] test_nn[['stock_id','time_id']] = test[['stock_id','time_id']] train_nn # + #https://bignerdranch.com/blog/implementing-swish-activation-function-in-keras/ from keras.backend import sigmoid,tanh,log,exp def swish(x, beta = 1): return (x * sigmoid(beta * x)) def swish_2(x, beta = 0.95): return (x * sigmoid(beta * x)) def swish_3(x, beta = 1.05): return (x * sigmoid(beta * x)) def mish(x): return (x * tanh(log(1+exp(x)))) from keras.utils.generic_utils import get_custom_objects from keras.layers import Activation get_custom_objects().update({'swish': Activation(swish)}) # get_custom_objects().update({'mish': Activation(mish)}) # hidden_units = (128,64,32) # hidden_units = (512,128,32) # hidden_units = (256,128,64,16) # hidden_units = (512,128,64,16) stock_embedding_size = 24 cat_data = train_nn['stock_id'] def base_model(): # Each instance will consist of two inputs: a single user id, and a single movie id stock_id_input = keras.Input(shape=(1,), name='stock_id') num_input = keras.Input(shape=(244,), name='num_data') # 247 - 1(stock_id) - 2(time_id, target) =244 # 258 #embedding, flatenning and concatenating stock_embedded = keras.layers.Embedding(max(cat_data)+1, stock_embedding_size, input_length=1, name='stock_embedding')(stock_id_input) stock_flattened = keras.layers.Flatten()(stock_embedded) out = keras.layers.Concatenate()([stock_flattened, num_input]) # Add one or more hidden layers hidden_units = (128,64,32) for n_hidden in hidden_units: out = keras.layers.Dense(n_hidden, activation='swish')(out) # out = keras.layers.Dense(n_hidden, activation='mish')(out) #out = keras.layers.Concatenate()([out, num_input]) # A single output: our predicted rating out = keras.layers.Dense(1, activation='linear', name='prediction')(out) model = keras.Model( inputs = [stock_id_input, num_input], outputs = out, ) return model def base_model_2(num_columns, # 244 # num_labels, # output_dim hidden_units, # (128,64,32) dropout_rates, # list of dropout_rates stock_embedding_size = 24, # ls = 1e-2, # label_smoothing = ls lr = 1e-3): # input stock_id_input = keras.Input(shape=(1,), name='stock_id') num_input = keras.Input(shape=(num_columns,), name='num_data') #embedding, flatenning and concatenating stock_embedded = keras.layers.Embedding(max(cat_data)+1, stock_embedding_size, input_length=1, name='stock_embedding')(stock_id_input) stock_flattened = keras.layers.Flatten()(stock_embedded) x = keras.layers.Concatenate()([stock_flattened, num_input]) i=0 # Add one or more hidden layers for n_hidden in hidden_units: # out = keras.layers.Dense(n_hidden, activation='swish')(out) x = keras.layers.Dense(n_hidden)(x) # if i%2 ==1: # x = keras.layers.BatchNormalization()(x) if i==0 or i==4: x = tf.keras.layers.GaussianNoise(0.1)(x) # std of noise x = keras.layers.Activation('swish')(x) # x = keras.layers.Activation('swish')(x) x = keras.layers.Dropout(dropout_rates[i])(x) i+=1 out = keras.layers.Dense(1, activation='linear', name='prediction')(x) model = keras.Model( inputs = [stock_id_input, num_input], outputs = out, ) return model # + # Function to calculate the root mean squared percentage error def rmspe(y_true, y_pred): return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))) # Function to early stop with root mean squared percentage error def feval_rmspe(y_pred, lgb_train): y_true = lgb_train.get_label() return 'RMSPE', rmspe(y_true, y_pred), False # + # # %%time target_name='target' scores_folds = {} model_name = 'NN' pred_name = 'pred_{}'.format(model_name) n_folds = 5 kf = model_selection.KFold(n_splits=n_folds, shuffle=True, random_state=2020) scores_folds[model_name] = [] counter = 1 features_to_consider = list(train_nn) # len -> 247 features_to_consider.remove('time_id') features_to_consider.remove('target') try: features_to_consider.remove('pred_NN') except: pass train_nn[features_to_consider] = train_nn[features_to_consider].fillna(train_nn[features_to_consider].mean()) test_nn[features_to_consider] = test_nn[features_to_consider].fillna(train_nn[features_to_consider].mean()) train_nn[pred_name] = 0 test_nn[target_name] = 0 test_predictions_nn1 = np.zeros(test_nn.shape[0]) valid_predictions_nn1 = np.zeros(train_nn.shape[0]) for n_count in range(n_folds): print('CV {}/{}'.format(counter, n_folds)) indexes = np.arange(nfolds).astype(int) indexes = np.delete(indexes,obj=n_count, axis=0) indexes = np.r_[values[indexes[0]],values[indexes[1]],values[indexes[2]],values[indexes[3]]] # 按列连接两个矩阵,就是把两矩阵上下相加,要求列数相等。 X_train = train_nn.loc[train_nn.time_id.isin(indexes), features_to_consider] y_train = train_nn.loc[train_nn.time_id.isin(indexes), target_name] X_test = train_nn.loc[train_nn.time_id.isin(values[n_count]), features_to_consider] y_test = train_nn.loc[train_nn.time_id.isin(values[n_count]), target_name] ############################################################################################# # NN ############################################################################################# # model = base_model() model = base_model() model.compile( keras.optimizers.Adam(learning_rate=0.006), # keras.optimizers.Adam(learning_rate=0.006,decay=3e-5), loss=root_mean_squared_per_error ) try: features_to_consider.remove('stock_id') except: pass num_data = X_train[features_to_consider] scaler = MinMaxScaler(feature_range=(-1, 1)) num_data = scaler.fit_transform(num_data.values) cat_data = X_train['stock_id'] target = y_train num_data_test = X_test[features_to_consider] num_data_test = scaler.transform(num_data_test.values) cat_data_test = X_test['stock_id'] model.fit([cat_data, num_data], target, batch_size=2048, epochs=1000, # sample_weight= 1/np.square(y_train), validation_data=([cat_data_test, num_data_test], y_test), callbacks=[es, plateau], validation_batch_size=len(y_test), shuffle=True, verbose = 1) # preds = model.predict([cat_data_test, num_data_test]).reshape(1,-1)[0] valid_predictions_nn1[train_nn.time_id.isin(values[n_count])] = model.predict([cat_data_test, num_data_test]).reshape(1,-1)[0] # score = round(rmspe(y_true = y_test, y_pred = preds),5) score = round(rmspe(y_true = y_test, y_pred = valid_predictions_nn1[train_nn.time_id.isin(values[n_count])]),5) print('Fold {} {}: {}'.format(counter, model_name, score)) scores_folds[model_name].append(score) tt =scaler.transform(test_nn[features_to_consider].values) #test_nn[target_name] += model.predict([test_nn['stock_id'], tt]).reshape(1,-1)[0].clip(0,1e10) test_predictions_nn1 += model.predict([test_nn['stock_id'], tt]).reshape(1,-1)[0].clip(0,1e10)/n_folds #test[target_name] += model.predict([test['stock_id'], test[features_to_consider]]).reshape(1,-1)[0].clip(0,1e10) counter += 1 features_to_consider.append('stock_id') print('avg val loss: ', round(np.mean(scores_folds['NN']), 5)) # + seed(41) tf.random.set_seed(41) target_name='target' scores_folds = {} model_name = 'NN2' pred_name = 'pred_{}'.format(model_name) n_folds = 5 kf = model_selection.KFold(n_splits=n_folds, shuffle=True, random_state=2021) scores_folds[model_name] = [] counter = 1 # features_to_consider = list(train1) features_to_consider = list(train_nn) features_to_consider.remove('time_id') features_to_consider.remove('target') try: features_to_consider.remove('pred_NN') except: pass # train1[features_to_consider] = train1[features_to_consider].fillna(train1[features_to_consider].mean()) # test1[features_to_consider] = test1[features_to_consider].fillna(train1[features_to_consider].mean()) # train1[pred_name] = 0 # test1[target_name] = 0 # test_predictions_nn2 = np.zeros(test_nn.shape[0]) train_nn[features_to_consider] = train_nn[features_to_consider].fillna(train_nn[features_to_consider].mean()) test_nn[features_to_consider] = test_nn[features_to_consider].fillna(train_nn[features_to_consider].mean()) train_nn[pred_name] = 0 test_nn[target_name] = 0 test_predictions_nn2 = np.zeros(test_nn.shape[0]) valid_predictions_nn2 = np.zeros(train_nn.shape[0]) for n_count in range(n_folds): print('CV {}/{}'.format(counter, n_folds)) indexes = np.arange(nfolds).astype(int) indexes = np.delete(indexes,obj=n_count, axis=0) indexes = np.r_[values[indexes[0]],values[indexes[1]],values[indexes[2]],values[indexes[3]]] # 按列连接两个矩阵,就是把两矩阵上下相加,要求列数相等。 X_train = train_nn.loc[train_nn.time_id.isin(indexes), features_to_consider] y_train = train_nn.loc[train_nn.time_id.isin(indexes), target_name] X_test = train_nn.loc[train_nn.time_id.isin(values[n_count]), features_to_consider] y_test = train_nn.loc[train_nn.time_id.isin(values[n_count]), target_name] # X_train = train1.loc[train1.time_id.isin(indexes), features_to_consider] # y_train = train1.loc[train1.time_id.isin(indexes), target_name] # X_test = train1.loc[train1.time_id.isin(values[n_count]), features_to_consider] # y_test = train1.loc[train1.time_id.isin(values[n_count]), target_name] ############################################################################################# # NN ############################################################################################# model = base_model_2(244, hidden_units=[512,256,128,128,64,64,32,16], # hidden_units=[128,128,128,128,32], #[256,128,64] dropout_rates=[0.02,0.02,0.01,0.01,0.01,0.02,0.02,0.02] # dropout_rates=[0.2,0.2,0.2,0.2,0.2,0.2,] ) model.compile( keras.optimizers.Adam(learning_rate=0.006), loss=root_mean_squared_per_error ) try: features_to_consider.remove('stock_id') except: pass num_data = X_train[features_to_consider] scaler = MinMaxScaler(feature_range=(-1, 1)) num_data = scaler.fit_transform(num_data.values) cat_data = X_train['stock_id'] target = y_train num_data_test = X_test[features_to_consider] num_data_test = scaler.transform(num_data_test.values) cat_data_test = X_test['stock_id'] model.fit([cat_data, num_data], target, batch_size=2048, epochs=1000, validation_data=([cat_data_test, num_data_test], y_test), callbacks=[es, plateau], validation_batch_size=len(y_test), shuffle=True, verbose = 1 ) # preds = model.predict([cat_data_test, num_data_test]).reshape(1,-1)[0] valid_predictions_nn2[train_nn.time_id.isin(values[n_count])] = model.predict([cat_data_test, num_data_test]).reshape(1,-1)[0] # score = round(rmspe(y_true = y_test, y_pred = preds),5) score = round(rmspe(y_true = y_test, y_pred = valid_predictions_nn2[train_nn.time_id.isin(values[n_count])]),5) print('Fold {} {}: {}'.format(counter, model_name, score)) scores_folds[model_name].append(score) tt =scaler.transform(test_nn[features_to_consider].values) #test_nn[target_name] += model.predict([test_nn['stock_id'], tt]).reshape(1,-1)[0].clip(0,1e10) # test_predictions_nn2 += model.predict([test1['stock_id'], tt]).reshape(1,-1)[0].clip(0,1e10)/n_folds test_predictions_nn2 += model.predict([test_nn['stock_id'], tt]).reshape(1,-1)[0].clip(0,1e10)/n_folds #test[target_name] += model.predict([test['stock_id'], test[features_to_consider]]).reshape(1,-1)[0].clip(0,1e10) counter += 1 features_to_consider.append('stock_id') print('avg val loss: ', round(np.mean(scores_folds['NN2']), 5)) # - valid_predictions_nn1, valid_predictions_nn2 test_predictions_nn1, test_predictions_nn2 # + # pd.Series(valid_predictions_nn1).to_csv('nn1_valid_pred_knn.csv',index=0) # pd.Series(valid_predictions_nn2).to_csv('nn2_valid_pred_knn.csv',index=0) # - import gc del train_nn, test_nn del X_train, X_test del cat_data, num_data, cat_data_test, num_data_test gc.collect()
models/NN (kfold_based_on_the_knn++_algorithm).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] papermill={} tags=[] # <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/> # + [markdown] papermill={} tags=[] # # Gmail - Send emails from Gsheet specific # <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Gmail/Gmail_Send_emails_from_Gsheet_specific.ipynb" target="_parent"><img src="https://naasai-public.s3.eu-west-3.amazonaws.com/open_in_naas.svg"/></a> # + [markdown] papermill={} tags=[] # **Tags:** #gmail #productivity #gsheet #naas_drivers #operations #snippet #email # + [markdown] papermill={} tags=[] # **Author:** [<NAME>](https://www.linkedin.com/in/ACoAAAJHE7sB5OxuKHuzguZ9L6lfDHqw--cdnJg/) # + [markdown] papermill={} tags=[] # Example : to a list of people in Gmail. # + [markdown] papermill={} tags=[] # ## Input # + [markdown] papermill={} tags=[] # ### Import libraries # + papermill={} tags=[] import naas from naas_drivers import gsheet from naas_drivers import html # + [markdown] papermill={} tags=[] # ### Read the gsheet # + papermill={} tags=[] spreadsheet_id = "1_VAF<KEY>" data = gsheet.connect(spreadsheet_id).get(sheet_name="Sheet1") # + [markdown] papermill={} tags=[] # ### Setting your email address # + papermill={} tags=[] your_email = "<EMAIL>" firstname_list = data['FIRST NAME'] email_list = data['EMAIL'] specific_message_list = data['SPECIFIC MESSAGE'] # + [markdown] papermill={} tags=[] # ## Model # + [markdown] papermill={} tags=[] # ### Mail preview # + papermill={} tags=[] url_image = naas.assets.add("2020.gif") email_content = html.generate( display='iframe', title='🎅 Merry Christmas', heading= '& Happy new year {first_name} 🍾', image = f"{url_image}", text_1= '{specific message}', text_2= "Even if 2020 has been extremely difficult year, let's make 2021 better!", text_3= "Keep smiling,", text_4= "Keep laughing,", text_5= "Spread love ❤️", ) # + [markdown] papermill={} tags=[] # ## Output # + [markdown] papermill={} tags=[] # ### Sending emails # + papermill={} tags=[] for i in range(len(data)): subject = "Merry Christmas & spread love for 2021 ❤️" content = email_content.replace("{first_name}",firstname_list[i]).replace("{specific message}",specific_message_list[i]) naas.notifications.send(email_to=email_list[i], subject=subject, html=content, email_from=your_email)
Gmail/Gmail_Send_emails_from_Gsheet_specific.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Taller sobre transformación de datos usando Python # Genere un archivo con el código presentado a continuación. # %%writefile data.txt E 1 b,g,f jjj:3,bbb:0,ddd:9,ggg:8,hhh:2 A 2 a,f,c ccc:2,ddd:0,aaa:3,hhh:9 B 5 f,e,a,c ddd:2,ggg:5,ccc:6,jjj:1 A 3 a,b hhh:9,iii:5,eee:7,bbb:1 C 6 f,g,d,a iii:6,ddd:5,eee:4,jjj:3 A 7 c,d bbb:2,hhh:0,ccc:4,fff:1,aaa:7 A 9 g,d,a aaa:5,fff:8,ddd:2,iii:0,jjj:7,ccc:1 B 1 b,a fff:3,hhh:1,ddd:2 E 2 d,e,a,f eee:4,ccc:5,iii:9,fff:7,ggg:6,bbb:0 B 3 d,b,g,f bbb:7,jjj:9,fff:5,iii:4,ggg:2,eee:3 C 7 d,c,f,b hhh:6,eee:4,iii:0,fff:2,jjj:1 C 5 d,e,a,c bbb:7,iii:6,ggg:9 D 3 g,e,f,b bbb:9,aaa:3,ccc:6,fff:4,eee:2 E 8 c,f aaa:8,ddd:5,jjj:1 B 9 d,b ccc:0,jjj:6,fff:7,ddd:3,aaa:2 D 1 f,e ccc:0,eee:6,bbb:9,ddd:3 E 3 e,b,f bbb:6,iii:3,hhh:5,fff:4,ggg:9,ddd:2 D 5 g,a hhh:4,jjj:5,ccc:9 E 8 e,c,f,a ccc:1,iii:6,fff:9 E 9 e,a bbb:9,aaa:3,fff:1 E 7 e,f ddd:9,iii:2,aaa:4 E 3 c,b,g ccc:5,fff:8,iii:7 D 5 c,f,a eee:3,jjj:2,ddd:7 A 1 f,a,d jjj:1,ggg:0,ccc:7,ddd:9,bbb:3 E 4 c,d jjj:6,ccc:0,aaa:1,hhh:9,iii:7,ggg:8 E 6 e,d,c fff:3,eee:6,iii:4,bbb:7,ddd:0,ccc:1 A 8 a,e,f fff:0,ddd:5,ccc:4 E 5 c,a,g ggg:6,hhh:3,ddd:9,ccc:0,jjj:7 A 6 f,e hhh:6,jjj:0,eee:5,iii:7,ccc:3 C 0 f,c,a,g eee:1,fff:4,aaa:2,ccc:7,ggg:0,ddd:6 A 1 b,f ccc:6,aaa:9,eee:5,ddd:0,bbb:3 D 2 b,f bbb:7,hhh:1,aaa:6,iii:4,fff:9,ddd:5 E 5 a,c fff:3,ccc:1,ggg:2,eee:5 B 4 b,f,c iii:7,ggg:3,ddd:0,jjj:8,hhh:5,ccc:1 B 6 f,a,e hhh:6,ccc:3,jjj:0,bbb:8,ddd:7 D 7 a,f aaa:0,fff:5,ddd:3 B 8 c,a ddd:5,jjj:2,iii:7,ccc:0,bbb:4 C 9 c,a,e,f eee:0,fff:2,hhh:6 E 1 e,d fff:9,iii:2,eee:0 E 5 f,a,d hhh:8,ggg:3,jjj:5 # Resuelva los siguientes problemas. # ## Problema 1 # # Cuánto es la suma de la segunda columna. # ## Problema 2 # # Genere una lista de tuplas, donde cada tupla contiene en la primera posición, el valor de la segunda columna; la segunda parte de la tupla es una lista con las letras de la primera columna que aparecen asociadas a dicho valor de la segunda columna. Esto es: # # # ('1', ['E', 'B', 'D', 'A', 'A', 'E']), # ('9', ['A', 'B', 'E', 'C']), # ('8', ['E', 'E', 'A', 'B']), # ('4', ['E', 'B']), # ('0', ['C']), # ('2', ['A', 'E', 'D']), # ('5', ['B', 'C', 'D', 'D', 'E', 'E', 'E']), # ('3', ['A', 'B', 'D', 'E', 'E']), # ('6', ['C', 'E', 'A', 'B']), # ('7', ['A', 'C', 'E', 'D'])] # ## Problema 3 # # Ordene la lista de tuplas del problema anterior y la lista de letras de la segunda parte de la tupla. # ## Problema 4 # # Calcule la cantidad de registros por clave de la columna 4. En otras palabras, ¿cuántos registros hay que tengan la clave `aaa`? Esto es: # # [('jjj', 18), # ('ccc', 23), # ('aaa', 13), # ('iii', 18), # ('eee', 15), # ('bbb', 16), # ('ddd', 23), # ('ggg', 13), # ('hhh', 16), # ('fff', 20)] # ## Problema 5 # # Genere una tabla que contenga la primera columna, la cantidad de elementos en la columna 3 y la cantidad de elementos en la columna 4. La columna 4 es una lista de claves y valores separados por comas. # # [['E', 3, 5], ['A', 3, 4], ['B', 4, 4], ['A', 2, 4], ['C', 4, 4], # ['A', 2, 5], ['A', 3, 6], ['B', 2, 3], ['E', 4, 6], ['B', 4, 6], # ['C', 4, 5], ['C', 4, 3], ['D', 4, 5], ['E', 2, 3], ['B', 2, 5], # ['D', 2, 4], ['E', 3, 6], ['D', 2, 3], ['E', 4, 3], ['E', 2, 3], # ['E', 2, 3], ['E', 3, 3], ['D', 3, 3], ['A', 3, 5], ['E', 2, 6], # ['E', 3, 6], ['A', 3, 3], ['E', 3, 5], ['A', 2, 5], ['C', 4, 6], # ['A', 2, 5], ['D', 2, 6], ['E', 2, 4], ['B', 3, 6], ['B', 3, 5], # ['D', 2, 3], ['B', 2, 5], ['C', 4, 3], ['E', 2, 3], ['E', 3, 3]] # ## Problema 6 # # Calcule la suma de la segunda columna por cada letra de la primera columna. # ## Problema 7 # # Calcule la suma por cada letra de la columna 3. # ## Problema 8 # # Construya una lista de tupas que tengan el texto de la columna 4 y la suma de los valores asociados, computada sobre todo el conjunto de datos.
notebooks/por revisar/TALLER-prepararcion-de-datos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import dicom import os import pandas as pd import matplotlib.pyplot as plt import numpy as np import pandas as pd import cv2 import math import scipy.ndimage from skimage import measure, morphology from mpl_toolkits.mplot3d.art3d import Poly3DCollection % matplotlib inline data_dir = os.getcwd() + '\\sample_images\\' patients = os.listdir(data_dir) labels_df = pd.read_csv('stage1_labels.csv',index_col = 0) print(data_dir) print(patients) labels_df.head() # - # Iterate through 5 patients in sample_image - checking out attributes for patient in patients[:5]: label = labels_df.get_value(patient,'cancer') path = data_dir + patient # using dicom to read the dicom files using full patient path slices = [dicom.read_file(path+ '/' + s) for s in os.listdir(path)] # sorting dicom files using lambda function # x - referred to dicom files slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) print(len(slices), slices[0].pixel_array.shape) # <p>IMPORTANT NOTE: Not all images are the same size.......depth isnt the same and width, height are too large for CNN </p> # No. of Instances in the sample_image len(patients) # + # Pre processing data IMG_PX_SIZE = 50 HM_SLICES = 20 #def chunks(l, n): # Credit: <NAME> # Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks # """Yield successive n-sized chunks from l.""" # for i in range(0, len(l), n): # yield l[i:i + n] def chunks(l, n): for i in range(0, len(l), int(n)): yield l[i:i + int(n)] def mean(l): return sum(l)/len(l) # Creating a 3D model of the images def plot_3d(image, threshold=-300): # Position the scan upright, # so the head of the patient would be at the top facing the camera p = image.transpose(2,1,0) verts, faces = measure.marching_cubes(p, threshold) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') # Fancy indexing: `verts[faces]` to generate a collection of triangles mesh = Poly3DCollection(verts[faces], alpha=0.70) face_color = [0.45, 0.45, 0.75] mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.set_xlim(0, p.shape[0]) ax.set_ylim(0, p.shape[1]) ax.set_zlim(0, p.shape[2]) plt.show() def process_data(patient, labels_df, img_px_size = 50, hm_slices=23, visualize = False): try: label = labels_df.get_value(patient, 'cancer') path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) new_slices = [] slices = [cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in slices] chunk_sizes = math.ceil(len(slices) / HM_SLICES) for slice_chunk in chunks(slices, chunk_sizes): slice_chunk = list(map(mean, zip(*slice_chunk))) new_slices.append(slice_chunk) if len(new_slices) == HM_SLICES-1: new_slices.append(new_slices[-1]) if len(new_slices) == HM_SLICES-2: new_slices.append(new_slices[-1]) new_slices.append(new_slices[-1]) if len(new_slices) == HM_SLICES+2: new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],]))) del new_slices[HM_SLICES] new_slices[HM_SLICES-1] = new_val if len(new_slices) == HM_SLICES+1: new_val = list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],]))) del new_slices[HM_SLICES] new_slices[HM_SLICES-1] = new_val #print(len(slices), len(new_slices)) if visualize: fig = plt.figure() for num, each_slice in enumerate(slices[:12]): y = fig.add_subplot(3,4,num+1) y.imshow(each_slice) plt.show() if label == 1: label = np.array([0,1]) elif label == 1: label = np.array([0,1]) return np.array(new_slices),label except Exception as e: # again, some patients are not labeled, but JIC we still want the error if something # else is wrong with our code print("blah",str(e)) much_data = [] for num, patient in enumerate(patients): if num%100 == 0: print(num) try: print("Num, Patient",num,patient) img_data, label = process_data(patient,labels_df,img_px_size=IMG_PX_SIZE, hm_slices=HM_SLICES) print("Image Data:",img_data, "Label:",label) much_data.append([img_data, label]) except KeyError as e: print("This is unlabeled data") pass np.save('muchdata--{}--{}--{}.npy'.format(IMG_PX_SIZE,IMG_PX_SIZE,HM_SLICES),much_data) # - # <p> Each of this instance is a patient </p> # + import tensorflow as tf import numpy as np save_dir = 'models/' save_path = os.path.join(save_dir, 'best_validation') IMG_SIZE_PX = 50 SLICE_COUNT = 20 n_classes = 2 batch_size = 10 x = tf.placeholder('float') y = tf.placeholder('float') keep_rate = 0.8 keep_prob = tf.placeholder(tf.float32) def conv3d(x, W): return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME') def maxpool3d(x): # size of window movement of window as you slide about return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='SAME') def train_neural_network(x): # # 5 x 5 x 5 patches, 1 channel, 32 features to compute. weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])), # 5 x 5 x 5 patches, 32 channels, 64 features to compute. 'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])), # 64 features 'W_fc':tf.Variable(tf.random_normal([54080,1024])), 'out':tf.Variable(tf.random_normal([1024, n_classes]))} biases = {'b_conv1':tf.Variable(tf.random_normal([32])), 'b_conv2':tf.Variable(tf.random_normal([64])), 'b_fc':tf.Variable(tf.random_normal([1024])), 'out':tf.Variable(tf.random_normal([n_classes]))} # image X image Y image Z x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1]) conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1']) conv1 = maxpool3d(conv1) conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2']) conv2 = maxpool3d(conv2) fc = tf.reshape(conv2,[-1, 54080]) fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc']) fc = tf.nn.dropout(fc, keep_rate) prediction = tf.matmul(fc, weights['out'])+biases['out'] print ("Pred",prediction) much_data = np.load('muchdata--50--50--20.npy') train_data = much_data #validation_data = much_data[-2:] cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) ) optimizer = tf.train.AdamOptimizer().minimize(cost) init = tf.global_variables_initializer() saver = tf.train.Saver() hm_epochs = 5 with tf.Session() as sess: sess.run(init) success_runs = 0 total_runs = 0 for epoch in range(hm_epochs): epoch_loss = 0 for data in train_data: total_runs +=1 try: X = data[0] Y = data[1] _, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y}) epoch_loss += c success_runs += 1 except Exception as e: pass print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss) correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct, 'float')) # Save all variables of the TensorFlow graph to file. saver.save(sess=sess, save_path=save_path) print('Done. Finishing accuracy:') #print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]})) #print('fitment percent:',successful_runs/total_runs) train_neural_network(x) # + much_data = np.load('muchdata--50--50--20.npy') instances = 0 for data in much_data: instances = instances + 1 X = data[0] Y = data[1] # print(X,Y) #print("There are",instances) # + # -
lung_cancer_dataanalytics/First Pass at Data Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %load_ext ipyext.writeandexecute # + # %%writeandexecute -i pmpc_brush_layer pmpc_brush_layer.py from __future__ import division from numpy import pi import mbuild as mb from mbuild.lib.atoms import H from mbuild.lib.surfaces import Betacristobalite from mbuild.examples.pmpc.brush import Brush class PMPCLayer(mb.lib.recipes.Monolayer): """Create a layer of grafted pMPC brushes on a beta-cristobalite surface.""" def __init__(self, pattern, tile_x=1, tile_y=1, chain_length=4, alpha=pi / 4): surface = Betacristobalite() brush = Brush(chain_length=chain_length, alpha=alpha) hydrogen = H() super(PMPCLayer, self).__init__(surface, brush, backfill=hydrogen, pattern=pattern, tile_x=tile_x, tile_y=tile_y) # - pattern = mb.Random2DPattern(10) pmpc_layer = PMPCLayer(pattern=pattern, chain_length=3, alpha=pi / 4, tile_x=1, tile_y=1) print(pmpc_layer) pmpc_layer.visualize()
mbuild/examples/pmpc/pmpc_brush_layer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # COVID-19 WORLWIDE DATASET ANALYSIS. import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt print("Modules are imported") corona_dataset_csv = pd.read_csv("D:\Data Science\Covid_Data_Set/covid19_Confirmed_dataset.csv") corona_dataset_csv.head(100) #Let's check what our data looks like. corona_dataset_csv.shape #Always good to check the shape( #of rows and column) of our data_set. # Since we are don't need the longtide and latitude of each country, we can delete those from our dataset. Adjusted_df = corona_dataset_csv.drop(["Lat","Long"], axis = 1) #Using drop method, we can drop the two col we don't need. Make sure to identify the axis. Adjusted_df # + active="" # # Using drop method will not delete the two column from the original data set. It just won't show in our new data from. to remove it from the original data set, # # we can do as follows: # # # # - corona_dataset_csv.drop(["Lat","Long"],axis = 1,inplace = True) #Inplace will drop the two unwatnted col. corona_dataset_csv.head() # # Aggregating the rows by country: # **Instead of having multiple data from the same country based on the province/region, we can combine them and get a signle dataset for each country.** corona_dataset_aggregated= corona_dataset_csv.groupby('Country/Region').sum() # This will group data from each region and sum up the total to result in a single output for each country corona_dataset_aggregated # + # Let's check the shape of our dataset, to see how many rows(countries)'s data we have in our new dataset.** corona_dataset_aggregated.shape # - #The above result indicates, we have 187 countries data in our dataset # # Performing Visualisations corona_dataset_aggregated.loc['Australia'] #Showing data for Australia # + #the function loc, pick's selection from the col,which stands for locations # - corona_dataset_aggregated.loc['Australia'].plot() # + #Let's try multiple countries on the same graph and compare. corona_dataset_aggregated.loc['China'].plot() corona_dataset_aggregated.loc['Italy'].plot() corona_dataset_aggregated.loc['Spain'].plot() plt.legend() #This will add the legend to make it easy to identify # - # # Calculating a good measure to do the analysis. Let's findout the spread of the virus in each country # + #Let's calculate the spread of the virus for the first week days in our dataset for China. # - corona_dataset_aggregated.loc['China'][:7].plot() # + #Calculating and plotting the first derivative of the carve. This just means, calculating the maximum infection rate for each day. # - corona_dataset_aggregated.loc['Australia'].diff().plot() #This will show the change in infection rate day by day. #Let's calculate the maximum infection rate for China, Italy and Spain. corona_dataset_aggregated.loc['China'].diff().max() #This is 24 hours change in China. corona_dataset_aggregated.loc['Italy'].diff().max() corona_dataset_aggregated.loc['Spain'].diff().max() # + #Let's calculate the maximum infection rate for each country and assign it to its own col. #Since the indexes of our dataset is a list of coutries,we can do as follow: countries = list(corona_dataset_aggregated.index) #Create an empty list and append the result of each countries infection rate into the new list max_infection_rates = [] for c in countries: max_infection_rates.append(corona_dataset_aggregated.loc[c].diff().max()) max_infection_rates # - #The above result is maximum infection rates for each country.Let's put it into its own column. corona_dataset_aggregated['max_infection_rate'] = max_infection_rates corona_dataset_aggregated.columns #Our new col is included in the end corona_dataset_aggregated.index corona_dataset_aggregated.head() #Create a dataset with only needed col. Let's get rid of col with null value. corona_data = pd.DataFrame(corona_dataset_aggregated['max_infection_rate']) corona_data.head() # + #Let's combine our analysis with world happiness dataset and see if how that corralates to covid infections # - happiness_report_csv = pd.read_csv("D:\Data Science\Covid_Data_Set\worldwide_happiness_report.csv") happiness_report_csv.head() happiness_report_csv.head() #From the world happiness data, we can drop the col that are useless for analysis. For example, overall rank, generosity, score...etc. useless_cols = ["Overall rank", "Score","Generosity", "Perceptions of corruption"] # + #Now let's drop the useless col above from our dataset. happiness_report_csv.drop(useless_cols, axis = 1, inplace = True) # - happiness_report_csv.head() # + #Let's get rid of the indexes. Instead use the country as an indexes. # - happiness_report_csv.set_index("Country or region", inplace = True) happiness_report_csv.head() # **Now let's join the world happiness dataset with the corona dataset.*** corona_data.head() corona_data.shape happiness_report_csv happiness_report_csv.shape # ***Since the number of countries in the happiness dataset is smaller than that of Corona dataset, we need to perform inner join to combine them on country col.*** # data = corona_data.join(happiness_report_csv, how = "inner") #Performing inner join. data.head() # ***Let's check if there's any correlation between the factors in the above dataset.*** data.corr() # ***Now let's visualize the correlation and the bigger picture. Better way to do this is using seaborn.*** data.head() # ****Let's plot GDP vs Maximum infection rate.**** # x = data["GDP per capita"] y = data["max_infection_rate"] sns.scatterplot(x = x, y = y) # + #From the above graph, it's clear that there's we can see a clear picture of how the GDP per capita is related to infection rate. let's fix this by using log scale. # - sns.scatterplot(x = x, y = np.log(y)) #This will apply log scaling into the y-axis. # + #Let's take it further, and plot the slop of our data to show how they correlate. we use regplot this time for better viz. sns.regplot(x = x, y = np.log(y)) # + #As we can see, there's a positive slope, showing a clear correlation in our dataset.Let's investigate further and plot for different combo. # - x = data["Healthy life expectancy"] y = data["max_infection_rate"] sns.regplot(x = x,y = np.log(y)) x = data["Social support"] y = data["max_infection_rate"] sns.regplot(x = x, y = np.log(y)) x = data["Freedom to make life choices"] y = data["max_infection_rate"] sns.regplot(x=x, y= np.log(y)) # ***Surprisingly, there seems to be a positive correlation in developed country than the rest, which indicates, people in a developed countries are prone to catching Covid than those in the developing country. In order to prove this, let's compare the result to the confirmed death dataset *** covid_death_dataset = pd.read_csv("D:\Data Science\Covid_Data_Set\covid19_deaths_dataset.csv") covid_death_dataset.head(10) covid_death_dataset.shape covid_death_dataset.drop(["Lat", "Long"], axis = 1, inplace = True) covid_death_dataset.head() # ***Let's perform some cleanup on death dataset.*** # + #Aggregate the data into a country. covid_death_dataset.shape # - # ***Let's aggregate data into a country. *** # + covid_death_aggregated = covid_death_dataset.groupby("Country/Region").sum() covid_death_aggregated # - covid_death_aggregated.shape # + #Let's check death number for our Australia. # - Covid_death_aggregated.loc["Australia"] # # Let's perform visualisations on death dataset. Covid_death_aggregated.loc["Australia"].plot() Covid_death_aggregated.loc["Australia"].plot() Covid_death_aggregated.loc["Spain"].plot() Covid_death_aggregated.loc["China"].plot() plt.legend() # ***Let's calculate change in death rate*** Covid_death_aggregated.loc["Australia"].diff().plot() # ***Let's calculate the max death rate in 24 hrs *** Covid_death_aggregated.loc["Australia"].diff().max() Covid_death_aggregated.loc["Germany"].diff().max() Covid_death_aggregated.loc["Australia"].diff().max()
_notebooks/2022-01-22-COVID-19 Data Analysis .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: demo-ext-wandb-37 # language: python # name: demo-ext-wandb-37 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Accelerating Deep Learning with Dask and GPUs # ## <NAME>, Senior Data Scientist # # # ### Center for Deep Learning, Northwestern University # ### April 27, 2021 # # [stephaniekirmer.com](https://www.stephaniekirmer.com) | twitter: [@data_stephanie](https://twitter.com/data_stephanie) | [saturncloud.io](https://saturncloud.io) # # https://github.com/skirmer/gpu_pytorch # # - # <style> # div.header { # position: absolute; # top: 10px; # right: 10px; # color: gray; # font-size: 12px; # } # </style> # # + [markdown] slideshow={"slide_type": "slide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # # ## Brief Introduction to Dask # # Dask is an open-source framework that enables parallelization of Python code. # # Two key concepts: # * Distributed data objects # * Distributed computation # # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ### Distributed Data # # Data is broken up across multiple machines, allowing analysis on data larger than any single machine's memory. # # ![](img/dask_df.png) # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ### Distributed Computation # # By using "lazy" evaluation, tasks can be organized and queued into DAGs/task graphs for distribution to workers and later computation. # # ![](img/dask_graph.png) # + [markdown] slideshow={"slide_type": "notes"} # [notes] # The foundation that makes this possible is what's called "lazy" evaluation or delayed evaluation. By creating delayed-evaluation tasks, you can develop task graphs, and distribute these across your compute resources to be run simultaneously. This may be used on single machines as well as clusters. # # This example shows an interconnected task graph of several delayed functions. # # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ## Dask Clusters # # When we implement the Dask framework across multiple machines, the cluster architecture looks something like this. In this structure, we can distribute tasks to the various machines, and return results in aggregate to the client. # # ![](img/dask-cluster.png) # + [markdown] slideshow={"slide_type": "slide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ## Applications for Deep Learning # # * Process extremely large data using distributed data objects and/or lazy loading # * Train very large or complex models using distributed training # # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ### Distributed Training # # * Training a single model across multiple machines simultaneously # * Break training data into subsets, each worker handles a different chunk # + [markdown] slideshow={"slide_type": "notes"} # [notes] By applying these foundations to deep learning tasks, we can expand the computation possible in a single unit of time - this includes training a single model on multiple machines simultaneously, scaling the training speed. # # In this demonstration, I'll apply the PyTorch Distributed Data Parallel framework to allow training an image classification model across a cluster. This allows the workers to communicate at intervals, sharing learning acquired during the iterations. # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ![](img/step1.png) # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ![](img/step2.png) # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ![](img/step3.png) # + [markdown] slideshow={"slide_type": "slide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # # Demonstration # # Training image classification model # # * Architecture: Resnet50 (not pretrained) # * Dataset: Stanford Dogs (20,580 images) # + [markdown] slideshow={"slide_type": "subslide"} # <div class = "header"> https://github.com/skirmer/gpu_pytorch </div> # # ### Key Elements # # * Lazy, parallelized loading of training images (S3 to DataLoader) # * Distributed training across cluster, one job per worker # * Use GPU machines for computation # * Performance monitoring outside training context # + slideshow={"slide_type": "skip"} # #%run -i run_cluster_pyt.py
center-deep-learning/CDL-Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # RTP anomaly # This code calculates the RTP anomaly of the simple model data. # + import numpy as np import matplotlib.pyplot as plt import cPickle as pickle import os import pandas as pd from fatiando import utils from fatiando.gravmag import polyprism from fatiando.mesher import PolygonalPrism from fatiando.vis import mpl, myv from matplotlib import colors, ticker, cm from IPython.display import Image as img from matplotlib.mlab import normpdf import matplotlib as mpb import matplotlib.patches as patches # - # ### Auxiliary functions # + import sys sys.path.insert(0, '../../code') import mag_polyprism_functions as mfun import mag_sphere_functions as sf # - # # Input model_dir = '../dipping/model.pickle' data_dir = 'data.pickle' # ### Importing model parameters with open(model_dir) as w: model = pickle.load(w) # ### Observation points and observed data with open('data.pickle') as w: data = pickle.load(w) d = pd.read_csv('dipping_regional_data.txt', sep=' ') # observed data and observation points dobs = d['res_data'].get_values() xp = d['x'].get_values() yp = d['y'].get_values() zp = d['z'].get_values() N = xp.size print N # ## Calculating the RTP anomaly # + zc = np.zeros_like(xp) + 300 inc, dec = data['main_field'] incs = model['inc'] decs = model['dec'] A = sf.sm_tf_sphere(xp, yp, zp, xp, yp, zc, inc, dec, incs, decs) # - mu = 1.e-3*np.trace(np.dot(A.T, A))/N lp = np.linalg.solve(np.dot(A.T,A) + mu*np.identity(xp.size), np.dot(A.T,dobs)) dp = np.dot(A, lp) # + plt.figure(figsize=(6,5)) plt.title('Layer momentum', fontsize=20) plt.tricontourf(yp, xp, lp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('A/m') mpl.m2km() plt.show() # + plt.figure(figsize=(19,5)) plt.subplot(131) plt.title('Observed TFA', fontsize=20) plt.tricontourf(yp, xp, dobs, 20, cmap='RdBu_r').ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') mpl.m2km() plt.subplot(132) plt.title('Predicted TFA', fontsize=20) plt.tricontourf(yp, xp, dp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') mpl.m2km() plt.subplot(133) plt.title('Residual TFA', fontsize=20) plt.tricontourf(yp, xp, dobs - dp, 20, cmap='RdBu_r').ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') mpl.m2km() plt.show() # - G = sf.sm_tf_sphere(xp, yp, zp, xp, yp, zc, 90., 0., 90., 0.) model['prisms'][0].props rtp = np.dot(G, lp) for m in model['prisms']: m.addprop('magnetization', utils.ang2vec(model['intensity'], 90., 0.)) model['prisms'][0].props rtp_true = polyprism.tf(xp, yp, zp, model['prisms'], 90., 0.) # + plt.figure(figsize=(19,5)) plt.subplot(131) plt.title('True RTP', fontsize=20) plt.tricontourf(yp, xp, rtp_true, 20, cmap='RdBu_r', vmin=-np.max(rtp_true), vmax=np.max(rtp_true)).ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') mpl.m2km() ax2 = plt.subplot(132) plt.title('Predicted RTP', fontsize=20) circle1 = plt.Circle((0, 0), 800, color='k', linewidth=3., fill=False) plt.tricontourf(yp, xp, rtp, 20, cmap='RdBu_r', vmin=-np.max(rtp_true), vmax=np.max(rtp_true)).ax.tick_params(labelsize=12) mpl.polygon(model['prisms'][0], '-g', linewidth=3., xy2ne=True) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') ax2.add_artist(circle1) mpl.m2km() plt.subplot(133) plt.title('Residual RTP', fontsize=20) plt.tricontourf(yp, xp, rtp_true - rtp, 20, cmap='RdBu_r', vmin=-np.max(rtp_true - rtp), vmax=np.max(rtp_true - rtp)).ax.tick_params(labelsize=12) plt.xlabel('$y$(km)', fontsize=18) plt.ylabel('$x$(km)', fontsize=18) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=13) clb.ax.set_title('nT') mpl.m2km() plt.show() # + # importing the pickle file of results result_path = 'l2-tfa-inversion/multiple-outcrop-3366665/' with open(result_path+'inversion.pickle') as w: inversion = pickle.load(w) # - z0 = inversion['z0'] intensity = inversion['intensity'] z0_plot, int_plot = np.meshgrid(z0, intensity) z0_plot = z0_plot.ravel() int_plot = int_plot.ravel() n = z0.size m = intensity.size N = inversion['x'].size y = inversion['y'] x = inversion['x'] obs = inversion['observed_data'] initial = inversion['results'][9][2][0][0] solution = [2,5] truevalues = [0,2] x0 = -200. y0 = 0. inversion['results'][17][1][0][-1] # building the matrix of the goal function gamma_matrix = mfun.goal_matrix(n, m, inversion['results']) # + #plt.close('all') plt.figure(figsize=(12,10)) ax1=plt.subplot(2,2,1) circle1 = plt.Circle((0, 0), 700, color='r', linewidth=2., fill=False) plt.tricontour(d['y'], d['x'], d['res_data'], 20, colors='k', linewidths=0.5) norm=mpb.colors.Normalize(vmin=np.min(d['res_data']), vmax=-np.min(d['res_data'])) plt.tricontourf(d['y'], d['x'], d['res_data'], 20, cmap = plt.get_cmap('RdBu_r'), norm=norm).ax.tick_params(labelsize=14) plt.plot(d['y'], d['x'], '.k', markersize=0.2) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.set_title('nT', pad=-292, fontsize=14) clb.ax.tick_params(labelsize=14) plt.xlabel('$y$(km)', fontsize=14, labelpad=0) plt.ylabel('$x$(km)', fontsize=14, labelpad=0) ax1.add_artist(circle1) mpl.polygon(model['prisms'][0], '-b', linewidth=2., xy2ne=True) plt.text(np.min(yp)-500, np.max(xp)+500, '(a)', fontsize=20) mpl.m2km() ax3=plt.subplot(2,2,2) #plt.title("Campo esfera (nT)") circle1 = plt.Circle((0, 0), 700, color='r', linewidth=2., fill=False) plt.tricontourf(d['y'], d['x'], d['tfa_true']-d['res_data']+ np.random.normal(loc=0., scale=5., size=d['y'].size), 15, cmap = plt.get_cmap('RdBu_r')).ax.tick_params(labelsize=14) plt.plot(d['y'], d['x'], '.k', markersize=0.2) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.set_title('nT', pad=-292, fontsize=14) clb.ax.tick_params(labelsize=14) plt.xlabel('$y$(km)', fontsize=14, labelpad=0) plt.ylabel('$x$(km)', fontsize=14, labelpad=0) ax3.add_artist(circle1) mpl.polygon(model['prisms'][0], '-b', linewidth=2., xy2ne=True) plt.text(np.min(yp)-500, np.max(xp)+500, '(b)', fontsize=20) mpl.m2km() ax2 = plt.subplot(223) circle1 = plt.Circle((0, 0), 700, color='r', linewidth=2., fill=False) #plt.title('Predicted RTP', fontsize=20) plt.tricontour(yp, xp, rtp, 20, colors='k', linewidths=0.5) plt.tricontourf(yp, xp, rtp, 20, cmap='RdBu_r', vmin=-np.max(rtp_true), vmax=np.max(rtp_true)).ax.tick_params(labelsize=14) plt.plot(yp, xp, '.k', markersize=0.2) plt.xlabel('$y$(km)', fontsize=14, labelpad=0) plt.ylabel('$x$(km)', fontsize=14, labelpad=0) clb = plt.colorbar(pad=0.025, aspect=40, shrink=1) clb.ax.tick_params(labelsize=14) clb.ax.set_title('nT', pad=-292, fontsize=14) mpl.polygon(model['prisms'][0], '-b', linewidth=2., xy2ne=True) ax2.add_artist(circle1) plt.text(np.min(yp)-500, np.max(xp)+500, '(c)', fontsize=20) mpl.m2km() ax2 = plt.subplot(2,2,4) w = 3 img = ax2.imshow(gamma_matrix, vmin=np.min(gamma_matrix), vmax=800, origin='lower',extent=[0,w,0,w]) clb = plt.colorbar(img, pad=0.012, shrink=1) clb.ax.set_title('nT$^2$', pad=-292, fontsize=14) clb.ax.tick_params(labelsize=14) img.axes.tick_params(labelsize=14) plt.ylabel('$z_0 (m)$', fontsize=14) plt.xlabel('$m_0 (A/m)$', fontsize=14) ax2.text(-0.4, 3.1, '(d)', fontsize=20) plt.plot((2.*truevalues[1]+1.)*w/(2.*m), (2.*truevalues[0]+1.)*w/(2.*n), '^r', markersize=10) plt.plot((2.*solution[1]+1.)*w/(2.*m), (2.*solution[0]+1.)*w/(2.*n), 'Dw', markersize=10) x_label_list = [] y_label_list = [] for xl, yl in zip(intensity,z0): x_label_list.append(str(xl)[:-2]) y_label_list.append(str(yl)[:-2]) ax2.set_xticks(np.linspace(w/(2.*n), w - w/(2.*n), n)) ax2.set_yticks(np.linspace(w/(2.*m), w - w/(2.*m), m)) ax2.set_xticklabels(x_label_list) ax2.set_yticklabels(y_label_list) # Minor ticks ax2.set_xticks(np.linspace(0, w, n+1), minor=True) ax2.set_yticks(np.linspace(0, w, m+1), minor=True) ax2.grid(which='minor', color='k', linewidth=1.5) plt.tight_layout(pad = 2.) plt.savefig('../../manuscript/figures/regional_rtp.png', dpi=300, bbox_inches='tight') plt.show() # -
code/dipping-regional/RTP_anomaly.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="-AkWVB-4lVn7" colab_type="code" colab={} import pandas as pd import numpy as np import random import re from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics.pairwise import cosine_similarity # + id="R4u7mp4ml-w9" colab_type="code" colab={} df = pd.read_csv('/content/drive/My Drive/Colab Datasets/COVID Chat Bot/cdc_qa.csv', header=0, names=['Questions', 'Answers']) # + id="SPUe5vk4mUIw" colab_type="code" outputId="65778657-6fd7-4ba3-f67c-1c83a31e245e" executionInfo={"status": "ok", "timestamp": 1585861450742, "user_tz": 300, "elapsed": 514, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj0LpL-hu169RD5uOvSoMS5Ja4MNM6gnH89MbKU=s64", "userId": "03990109818721859613"}} colab={"base_uri": "https://localhost:8080/", "height": 198} df.head(5) # + id="qfPmfvmlmzuO" colab_type="code" colab={} vectorizer = CountVectorizer() count_vec = vectorizer.fit_transform(df['Questions']).toarray() # + id="pwHGaUeemaWL" colab_type="code" colab={} def COVIDbot(user_response): text = vectorizer.transform([user_response]).toarray() df['similarity'] = cosine_similarity(count_vec, text) return df.sort_values(['similarity'], ascending=False).iloc[0]['Answers'] # + id="8vCymXceunG-" colab_type="code" outputId="c4a3a451-df3b-4f85-ae12-3b4d8214b85d" executionInfo={"status": "ok", "timestamp": 1585865200096, "user_tz": 300, "elapsed": 520, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj0LpL-hu169RD5uOvSoMS5Ja4MNM6gnH89MbKU=s64", "userId": "03990109818721859613"}} colab={"base_uri": "https://localhost:8080/", "height": 55} COVIDbot('what is coronavirus?') # + id="tmDVU6Myp9Lg" colab_type="code" colab={} welcome_input = ("hello", "hi", "greetings", "sup", "what's up","hey",) welcome_response = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"] def welcome(user_response): for word in user_response.split(): if word.lower() in welcome_input: return random.choice(welcome_response) # + id="hWXfV-i-okK7" colab_type="code" outputId="deb065d0-fe0a-406d-ef8e-954909123603" executionInfo={"status": "ok", "timestamp": 1585865783617, "user_tz": 300, "elapsed": 105884, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj0LpL-hu169RD5uOvSoMS5Ja4MNM6gnH89MbKU=s64", "userId": "03990109818721859613"}} colab={"base_uri": "https://localhost:8080/", "height": 219} flag=True print("Greetings! I am a chatbot and I will try to answer your questions about COVID-19. If you want to exit, type Bye!") while(flag==True): user_response = input() user_response = user_response.lower() if(user_response not in ['bye','shutdown','exit', 'quit']): if(user_response=='thanks' or user_response=='thank you'): flag=False print("Chatbot : You are welcome..") else: if(welcome(user_response)!=None): print("Chatbot : "+welcome(user_response)) else: print("Chatbot : ",end="") print(COVIDbot(user_response)) else: flag=False print("Chatbot : Bye!!! ") # + id="gJwulRTGspgF" colab_type="code" colab={} # def split_input(user_string, chunk_size): # output = [] # words = user_string.split(" ") # total_length = 0 # while (total_length < len(user_string) and len(words) > 0): # line = [] # next_word = words[0] # line_len = len(next_word) + 1 # while (line_len < chunk_size) and len(words) > 0: # words.pop(0) # line.append(next_word) # if (len(words) > 0): # next_word = words[0] # line_len += len(next_word) + 1 # line = " ".join(line) # output.append(line) # total_length += len(line) # return output # + id="Sl748endsrAX" colab_type="code" colab={} # split_input(df['Answers'].iloc[0], 80) # + id="ISmsY1L10y3t" colab_type="code" colab={} # def format_response(user_response): # a = user_response.split('. ') # b = '.\n' # c = b.join(a) # print(c) # + id="DFYpp28U1c6c" colab_type="code" colab={} # format_response(df['Answers'].iloc[0]) # + id="_Ap_4Zr6270S" colab_type="code" colab={} # import textwrap # + id="f0jaEfGG3ExD" colab_type="code" colab={} # textwrap.wrap(df['Answers'].iloc[0])
COVID Chatbot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: saturn (Python 3) # language: python # name: python3 # --- # + import requests import pandas as pd import numpy as np import json import haversine import datetime from haversine import Unit from prefect import task, Flow, Parameter, case from prefect.tasks.notifications.email_task import EmailTask from prefect.schedules import IntervalSchedule # - ENDPOINT = 'https://www.vaccinespotter.org/api/v0/states' # + @task def load_data(state): json_payload = requests.get(f'{ENDPOINT}/{state}.json') data = json.loads(json_payload.content) df = pd.DataFrame([x['properties'] for x in data['features']]) df['coordinates'] = [(x['geometry']['coordinates'][1], x['geometry']['coordinates'][0]) for x in data['features']] df['appointments_last_fetched'] = pd.to_datetime(data['metadata']['appointments_last_fetched']) df['as_of_time'] = df.appointments_last_fetched.dt.tz_convert('US/Eastern').dt.strftime('%B %d, %Y, %I:%M %p') return df @task def available_appts(df, current_coords, distance_miles=None, filters=None): close_df = df[df.appointments_available == True] close_df['distance_miles'] = close_df['coordinates'].apply( lambda x: haversine.haversine(x, current_coords, unit=Unit.MILES) ) if distance_miles: close_df = close_df[(close_df.distance_miles <= distance_miles)] if filters: for k, v in filters.items(): close_df = close_df[close_df[k] == v] return close_df @task def is_appt_avail(avail_df): return len(avail_df) > 0 @task def notification_email(avail_df, current_coords, distance_miles=None, filters=None): def format_appt(x): time_df = pd.DataFrame(avail_df.appointments.iloc[0]) time_df['time'] = pd.to_datetime(time_df['time']) time_df['time_formatted'] = time_df['time'].dt.strftime('%B %d, %Y, %I:%M %p') time_df['appt_formatted'] = time_df.agg( lambda x: f'{x.time_formatted} ({x.type})' if 'type' in x else x.time_formatted, axis=1, ) time_df['appt_formatted'] = '<li>' + time_df['appt_formatted'] + '</li>' return ''.join(time_df['appt_formatted'].values) avail_df = avail_df.fillna({'provider': '', 'address': '', 'city': '', 'state': '', 'postal_code': ''}) avail_df['appointments_html'] = '<ul>' + avail_df.appointments.apply(format_appt) + '</ul>' avail_df['html'] = ( '<h2>' + avail_df.provider + ' - ' + avail_df.address + ', ' + avail_df.city + ', ' + avail_df.state + ', ' + avail_df.postal_code + ' (' + np.round(avail_df.distance_miles).astype(str) + ' miles)' + '</h2>' + avail_df.appointments_html ) email_content = f'Date appointments pulled: {avail_df.as_of_time.iloc[0]}' email_subject = f'COVID-19 Vaccine Appointments near {current_coords}' if distance_miles: email_content += f'<h1> Within {distance_miles} miles of {current_coords}</h1>' if filters: email_content += f'<h2>Filters:</h2> <p>{filters}</p>' email_content += ''.join(avail_df.html.values) return (email_subject, email_content) email_task = EmailTask(email_from='<EMAIL>') # - df = load_data.run('FL') # + schedule = IntervalSchedule(interval=datetime.timedelta(minutes=10)) with Flow('covid-vaccine-appt-notifier', schedule) as flow: state = Parameter('state') current_coords = Parameter('current_coords') distance_miles = Parameter('distance_miles') filters = Parameter('filters') email_to = Parameter('email_to') df = load_data(state) avail_df = available_appts(df, current_coords, distance_miles, filters) with case(is_appt_avail(avail_df), True): email_subject_content = notification_email(avail_df, current_coords, distance_miles, filters) email_task(email_to=email_to, subject=email_subject_content[0], msg=email_subject_content[1]) # - state = # 'FL' current_coords = # [lat, lon] distance_miles = 100 filters = {} email_to = # '' flow.run( parameters={ 'state': state, 'current_coords': current_coords, 'distance_miles': distance_miles, 'filters': filters, 'email_to': email_to, } )
vaccine-appts-prototype.ipynb
-- --- -- jupyter: -- jupytext: -- text_representation: -- extension: .hs -- format_name: light -- format_version: '1.5' -- jupytext_version: 1.14.4 -- kernelspec: -- display_name: Haskell -- language: haskell -- name: haskell -- --- -- # Numerical Representations -- [![nbviewer](https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg)](https://nbviewer.org/github/matyama/pfds/blob/main/notebooks/num.ipynb) -- + class Num n where -- Increment given number by one inc :: n -> n -- Decrement given number by one dec :: n -> n class Num n => Add n where -- Add two numbers together add :: n -> n -> n -- - -- ## Dense Binary Numbers -- Let $N \in \mathbb{N}$ have a binary represetation $b_0 \dots b_{m-1}$ with digits $b_i \in \{0, 1\}$, i.e. $N = \sum_{i=0}^{m-1} b_i 2^i$. Then the *dense representation* explicitly stores each digit $b_i$. -- -- Since there can be at most logarithmuc nuber of 0s and 1s, both `inc` and `dec` run in $O(log(n))$. -- + {-# LANGUAGE FlexibleInstances #-} data Digit = Zero | One -- | Dense binary numbers as a list of 'Digit's in increasing order of significance type DenseNat = [Digit] instance Num DenseNat where inc [] = [One] inc (Zero:ds) = One : ds inc (One:ds) = Zero : inc ds -- carry dec [One] = [] dec (One:ds) = Zero : ds dec (Zero:ds) = One : dec ds -- borrow instance Add DenseNat where add ds [] = ds add [] ds = ds add (d:ds1) (Zero:ds2) = d : add ds1 ds2 add (Zero:ds1) (d:ds2) = d : add ds1 ds2 add (One:ds1) (One:ds2) = Zero : inc (add ds1 ds2) -- carry -- - -- ## Sparse Binary Numbers -- Contrary to the *dense representation* which stores zeros, in the *sparse representation* zeros are omitted. -- -- Sparse encoding, however, must still preserve complete information (must be representative). Therefore in sparse representation one stores ranks or weights of the non-zero digits $b_i$ rather than the digits themself - e.g. for binary numbers one'd store just those weights $w_i = 2^i$ for which $b_i = 1$. -- + {-# LANGUAGE FlexibleInstances #-} -- | Sparse binary numbers as an increasing list of weights, each a power of 2 type SparseNat = [Int] carry :: Int -> SparseNat -> SparseNat carry w [] = [w] carry w ws @ (w':ws') = if w < w' then w : ws else carry (2*w) ws' borrow :: Int -> SparseNat -> SparseNat borrow w ws @ (w':ws') = if w == w' then ws' else w : borrow (2*w) ws instance Num SparseNat where inc = carry 1 dec = borrow 1 instance Add SparseNat where add ws [] = ws add [] ws = ws add m @ (w1:ws1) n @ (w2:ws2) = case compare w1 w2 of LT -> w1 : add ws1 n GT -> w2 : add m ws2 EQ -> carry (2*w1) (add ws1 ws2) -- - -- ## Zeroless Binary Numbers -- *Zeroless binary numbers* are similar to ordinary binary numbers but are constructed from 1s and 2s. There's no zero in this representation but the weight of the $i$-th digit is still $2^i$. -- -- For example the decimal number 16 can be written as $\text{2111}$ instead of $\text{00001}$ (note: in our binary represendation LSB is digit $b_0$). -- -- ### Benefits -- The main benefit of this representation is that all digits are non-zero. This can be exploited by data structures using a dense representation to efficiently store and access data at the first position. -- + -- | Binary digit for which Zero -> One and One -> Two data Digit = One | Two deriving (Show, Eq) -- | Dense zeroless representation as a list of non-zero digits in increasing order of weigths 2^i type ZerolessNat = [Digit] instance Num ZerolessNat where inc [] = [One] inc (One:ds) = Two : ds inc (Two:ds) = One : inc ds dec [One] = [] dec (Two:ds) = One : ds dec (One:ds) = Two : dec ds instance Add ZerolessNat where -- | Note: Compared to 'DenseNat', here carries involve either 1s or 2s. add ds [] = ds add [] ds = ds add (One:ds1) (One:ds2) = Two : Ghci2.add ds1 ds2 add (One:ds1) (Two:ds2) = One : inc (Ghci2.add ds1 ds2) add (Two:ds1) (d:ds2) = d : inc (Ghci2.add ds1 ds2) -- - -- ## Redundant Binary Numbers -- -- In the implementation of `LazyNat` presented below, we make a case for a [*redundant representation*](https://en.wikipedia.org/wiki/Redundant_binary_representation) of binary numbers. -- -- The motivation behind `LazyNat` is to amortize operations to $O(1)$ via lazy evaluation. As the book demonstrates, there is a problem with a straightforward modification of `DenseNat` - although both `inc` and `dec` can be made amortized $O(1)$ independently but this breaks when used in combination. -- -- Informally, one operation makes one of the digits "dangerous" (expensive) while the other operation does the exact opposite. The solution to this probelm is to introduce another redundant digit that is "safe" for both operations. -- + -- | Redundant representation of binary digits where -- | - 'Zero' is dangerous for the 'inc' operation -- | - 'One' is safe for both operations -- | - 'Two' is dangerous for the 'dec' operation data Digit = Zero | One | Two -- | Lazy, dense and redundant representation of binary numbers type LazyNat = [Digit] instance Num LazyNat where -- | Note: Runs in O(1) amortized time. inc [] = [One] inc (Zero:ds) = One : ds inc (One:ds) = Two : ds inc (Two:ds) = One : inc ds -- | Note: Runs in O(1) amortized time. dec [One] = [] dec (One:ds) = Zero : ds dec (Two:ds) = One : ds dec (Zero:ds) = One : dec ds instance Add LazyNat where add ds [] = ds add [] ds = ds add (d:ds1) (Zero:ds2) = d : add ds1 ds2 add (Zero:ds1) (d:ds2) = d : add ds1 ds2 add (One:ds1) (One:ds2) = Two : add ds1 ds2 add (d:ds1) (Two:ds2) = d : inc (add ds1 ds2) add (Two:ds1) (d:ds2) = d : inc (add ds1 ds2) -- - -- ## Segmented Binary Numbers -- The representation of *segmented binary numbers* `SegmentedNat` is similar to the ordinary `DenseNat` but with digits grouped into blocks of either 0s or 1s. -- -- The benefit of this segmentation is that carries and borrows do not cascade and thus can be carried over whole blocks in constant time (all at once). Due to this property, `inc` and `dec` operations run in $O(1)$ worst case time. -- -- Note that although this representation provides efficinent operations, usually it is not directly possible to translate this numerical representation to more complicated structures (e.g. to trees). -- + -- | Segment certain number of either Os or 1s data DigitBlock = Zeros Int | Ones Int -- | Segmented (dense) representation of binary numbers type SegmentedNat = [DigitBlock] -- | Helper that merges adjecent blocks of 0s, discards empty blocks and removes trailing 0s. -- | -- | Note: Due to the 'DigitBlock' representation this function runs in O(1) worst case time. zeros :: Int -> SegmentedNat -> SegmentedNat zeros _ [] = [] zeros 0 bs = bs zeros i ((Zeros j):bs) = Zeros (i + j) : bs zeros i bs = Zeros i : bs -- | Helper that merges adjecent blocks of 1s and discards empty blocks. -- | -- | Note: Due to the 'DigitBlock' representation this function runs in O(1) worst case time. ones :: Int -> SegmentedNat -> SegmentedNat ones 0 bs = bs ones i ((Ones j):bs) = Ones (i + j) : bs ones i bs = Ones i : bs instance Num SegmentedNat where -- | Note: Runs in O(1) worst case time based on the analysis of 'ones' and 'zeros'. inc [] = [Ones 1] inc ((Zeros i):bs) = ones 1 $ zeros (i - 1) bs inc ((Ones i):bs) = Zeros i : inc bs -- | Note: Runs in O(1) worst case time based on the analysis of 'ones' and 'zeros'. dec ((Ones i):bs) = zeros 1 $ ones (i - 1) bs dec ((Zeros i):bs) = Ones i : dec bs instance Add SegmentedNat where -- | Note: This implementation expects properly segmented inputs. If this cannot be assumed, -- | one could use for instance `zeros (j - i) ds2` in `add ds1 (Zeros (j - i) : ds2)`. add bs [] = bs add [] bs = bs add ((Zeros i):ds1) ((Zeros j):ds2) = if i < j then zeros i $ add ds1 (Zeros (j - i) : ds2) else zeros j $ add (Zeros (i - j) : ds1) ds2 add ((Ones i):ds1) ((Zeros j):ds2) = if i < j then ones i $ add ds1 (Zeros (j - i) : ds2) else ones i $ add (Ones (i - j) : ds1) ds2 add ((Zeros i):ds1) ((Ones j):ds2) = if i < j then ones i $ add ds1 (Ones (j - i) : ds2) else ones i $ add (Zeros (i - j) : ds1) ds2 add ((Ones i):ds1) ((Ones j):ds2) = if i < j then zeros i $ inc (add ds1 (Ones (j - i) : ds2)) else zeros j $ inc (add (Ones (i - j) : ds1) ds2) -- - -- ## Redundant Segmented Binary Numbers -- `RedundantSegmentedNat` is a more practical variant of `SegmentedNat` which combines *redundant* and *segmented representation*. -- -- ### Representation -- -- The representation is similar to `LazyNat` in that there are two "dangerous" digits (`Zero` and `Two`) but the "safe" digit is actually a whole block of `Ones` - just as in `SegmentedNat`. -- -- ### Operations -- -- One can consider a `Two` as a carry in progress. Then to prevent a cascade of carries, one has to guarantee that there are no more than one `Two` in a row. In regular expressions, this invariant translates to either of these two: -- - $(\text{0}\:|\:\text{1}\:|\:\text{01}^*\text{2})^*$ -- - $(\text{0}^*\text{1}\:|\:\text{0}^+\text{1}^*\text{2}^*)^*$ (without trailing zeros) -- -- I.e. the last non-one digit before each `Two` is a `Zero`. -- -- Because the first digit is never a `Two`, `inc` can run in $O(1)$ worst case time - by simply incrementing the first digit and then fixing the invariant. `dec`, on the other hand, may take $O(log(n))$ time in the worst case. -- -- ### Generalization (*recursive slowdown*) -- -- This numerical representation can be viewed as a template for data structures composed of a sequence of levels where each level can be classified as -- - *green* corresponds to `Zero` in the above interpretation -- - *yellow* corresponds to `One` in the above interpretation -- - *red* corresponds to `Two` in the above interpretation -- -- > An operation may degrade the color of the first level from *green* to *yellow* or from *yellow* to *red* but never from *green* to *red*. [...] The invariant is that the last non-yellow level before a red level is always green. [...] Consecutive *yellow* levels are grouped into a block to support efficient access to the first non-yellow level. -- -- For instance a *segmented binomial heap* that supports `insert` in $O(1)$ worst case time can be represented as follows: -- ```haskell -- data Tree a = Node a [Tree a] -- -- data Digit a = Zero | Ones [Tree a] | Two (Tree a) (Tree a) -- -- type SegmentedBinomialHeap a = [Digit a] -- ``` -- -- Note: The invariant is restored after each `merge` by eliminating all `Two`s. -- + -- | Semi-segmented representation of redundant binary numbers data Digits = Zero | Ones Int | Two type RedundantSegmentedNat = [Digits] -- | Helper to merge adjacent blocks of 1s and delete empty blocks. -- | -- | Note: This function runs in O(1) worst case time. ones :: Int -> RedundantSegmentedNat -> RedundantSegmentedNat ones 0 ds = ds ones i ((Ones j):ds) = Ones (i + j) : ds ones i ds = Ones i : ds -- | Increment given number by blindly incrementing the first digit. -- | -- | Note: Runs in O(1) worst case time. simpleinc :: RedundantSegmentedNat -> RedundantSegmentedNat simpleinc [] = [Ones 1] simpleinc (Zero:ds) = ones 1 ds simpleinc ((Ones i):ds) = Two : ones (i - 1) ds -- | Restore the invariant by fixing leading 'Two' and the first non-one digit in a sequence 'ds'. -- | -- | Note: Runs in O(1) worst case time. fixup :: RedundantSegmentedNat -> RedundantSegmentedNat fixup (Two:ds) = Zero : simpleinc ds fixup ((Ones i):Two:ds) = Ones i : Zero : simpleinc ds fixup ds = ds instance Num RedundantSegmentedNat where -- | Increment given number in O(1) worst case time. inc = fixup . simpleinc -- | Decrement given number in O(log(n)) worst case time. -- | -- | This implementation is a combination of 'dec' for 'LazyNat' and 'SegmentedNat'. -- | The complexity comes from the fact that 0s (and 2s) are dense and not segmented. dec ((Ones i):ds) = Zero : ones (i - 1) ds dec (Two:ds) = ones 1 ds dec (Zero:ds) = ones 1 $ dec ds -- TODO: `instance Add RedundantSegmentedNat` -- - -- As mentioned above, `dec` may unfortunately take up to $O(log(n))$ time in the worst case. -- -- However, the complexity of `dec` can be improved to $O(1)$ by extending the redundant representation to digits 0, 1, 2, 3 and 4 where -- - `Zero` and `Four` are *red* (i.e. borrow and carry in progress) -- - `Ones` and `Threes` are *yellow* (represented as segments) -- - and `Two` is *green* -- -- For the `RedundantSegmentedNat` above we maintained the invariant that there is no more than one *red* digit in a row (i.e. the first non-one digit had to be `Zero` - the *green* digit). Similarly, here we must ensure that the first non-one, non-three is `Two` (which is now *green*). -- -- TODO: Check whether the implementation below is correct. Most importantly the fixup functions. Also implement `Add` instance for `SegmentedRedundantNat`. -- + -- | Segmented representation of redundant binary numbers data Digits = Zero | Ones Int | Two | Threes Int | Four type SegmentedRedundantNat = [Digits] -- | Helper to merge adjacent blocks of 1s and delete empty blocks. -- | -- | Note: This function runs in O(1) worst case time. ones :: Int -> SegmentedRedundantNat -> SegmentedRedundantNat ones 0 ds = ds ones i ((Ones j):ds) = Ones (i + j) : ds ones i ds = Ones i : ds -- | Helper to merge adjacent blocks of 3s and delete empty blocks. -- | -- | Note: This function runs in O(1) worst case time. threes :: Int -> SegmentedRedundantNat -> SegmentedRedundantNat threes 0 ds = ds threes i ((Threes j):ds) = Threes (i + j) : ds threes i ds = Threes i : ds -- | Increment given number by blindly incrementing the first digit. -- | -- | Note: Runs in O(1) worst case time. simpleinc :: SegmentedRedundantNat -> SegmentedRedundantNat simpleinc [] = [Ones 1] simpleinc (Zero:ds) = ones 1 ds simpleinc (Two:ds) = threes 1 ds simpleinc ((Ones i):ds) = Two : ones (i - 1) ds simpleinc ((Threes i):ds) = Four : threes (i - 1) ds -- | TODO: check if 'Two' is correct -- | -- | Restore the invariant after 'simpleinc' by fixing leading 'Four' and -- | the first non-three digit in a sequence 'ds'. -- | -- | Note: Runs in O(1) worst case time. fixupinc :: SegmentedRedundantNat -> SegmentedRedundantNat fixupinc (Four:ds) = Two : simpleinc ds fixupinc ((Threes i):Four:ds) = Threes i : Two : simpleinc ds fixupinc ds = ds -- | Decrement given number by blindly decrementing the first digit. -- | -- | Note: Runs in O(1) worst case time. simpledec :: SegmentedRedundantNat -> SegmentedRedundantNat simpledec (Two:ds) = ones 1 ds simpledec (Four:ds) = threes 1 ds simpledec ((Ones i):ds) = Zero : ones (i - 1) ds simpledec ((Threes i):ds) = Two : threes (i - 1) ds -- | TODO: check if 'Two' is correct -- | -- | Restore the invariant after 'simpledec' by fixing leading 'Zero' and -- | the first non-one digit in a sequence 'ds'. -- | -- | Note: Runs in O(1) worst case time. fixupdec :: SegmentedRedundantNat -> SegmentedRedundantNat fixupdec (Zero:ds) = Two : simpledec ds fixupdec ((Ones i):Zero:ds) = Ones i : Two : simpledec ds fixupdec ds = ds instance Num SegmentedRedundantNat where -- | Increment given number in O(1) worst case time. inc = fixupinc . simpleinc -- | Decrement given number in O(1) worst case time. dec = fixupdec . simpledec -- TODO: `instance Add SegmentedRedundantNat` -- - -- ## Skew Binary Numbers -- *Skew representation* of binary numbers defines weights $w_i = 2^{i + 1} - 1$ instead of typical $2^i$ and the set of digits is $D_i = \{\text{0}, \text{1}, \text{2}\}$. -- -- ### Representation -- This representation is redundant but can be made unique (in *canonical form*) if one adds the restriction that only the lowest non-zero digit may be two. -- > Every natural number has a unique skew binary canonical form. -- -- Since `inc` scans for the lowest non-zero digit (see operations below), the natural choice for representation is *sparse*. Using *dense* would require more than $O(1)$ steps which defeats the purpose. -- -- ### Operations -- Observation: $1 + 2(2^{i + 1} - 1) = 2^{i + 2} - 1$. This implies -- 1. `inc` a number that doen't contain 2 => just increment the lowest digit: 0 -> 1 or 1 -> 2 -- 1. `inc` a number with the lowest non-zero digit being 2 => reset it to 0 and increment the next digit -- -- ### Benefits -- - Similarly to *lazy* and *segmented* representations, both `inc` and `dec` run in $O(1)$ worst case time in the *skew representation* (instead of $O(log(n))$) -- - The implementation is ususally simpler and faster in practice -- + -- | Sparse representation of skew binary numbers as a list of weights. -- | - weight are stored in increasing order -- | - only the first two (smallest) weight may be the same - indicating that the lowest non-zero digit is two type SkewNat = [Int] instance Num SkewNat where -- | Increment given number in O(1) worst case time. inc ws @ (w1:w2:rs) = if w1 == w2 then (1 + w1 + w2):rs else 1:ws inc ws = 1:ws -- | Decrement given number in O(1) worst case time. -- | -- | Note: If 'w = 2^(k + 1) - 1' then '(w `div` 2) = 2^k - 1'. dec (1:ws) = ws dec (w:ws) = (w `div` 2) : (w `div` 2) : ws -- - -- ## Triary and Quaternary Numbers -- These number systems are a natural extenstion to the binary numbers. In binary representations (not considering skew representation for a moment) the $i$-th digit has weight $2^i$, i.e. the base $k = 2$. -- -- For *triary* and *quaternary* numbers the base is $k = 3$ and $k = 4$ respectively. The weight is thus in general $k^r$ were $r$ is the *rank*. -- -- ### Benefits -- Since the base $k$ is large compared to binary representations, one needs fewer digits to represent each number. If $k = 2$ there is approx. $log_2 n$ digits to represent a number $n$. On the other hand, in general base $k$ there is apporx. $log_k n = \frac{log_2 n}{log_2 k}$ digits. -- -- ### Disadvantages -- Becasue there are more values for each digit, the processing of each digit in higher bases might take longer. -- -- Processing a digit in base $k$ often takes $k + 1$ steps, hence processing all the digits takes approx. $(k + 1) log_k n = \frac{k + 1}{log_2 k} log_2 n$. -- -- So in the end it is a question of balance. From the table below, it is easy to see that the net benefit (i.e. speedup) can be achieved only for some bases $k$. Specifically for $k = 3$ (triary numbers) and $k = 4$ (quaternary numbers). -- -- | k | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -- |:------------------:|:----:|:----:|:----:|:----:|:----:|:----:|:----:| -- | (k + 1) / log_2 k | 3.00 | 2.52 | 2.50 | 2.58 | 2.71 | 2.85 | 3.00 | -- -- ### Skew Representations -- Skew binary numbers can be generalized to other bases too. In *skew k-ary numbers*, the $i$-th digit $d_i \in \{0, \dots, k - 1\}$ has weight $(k^{i + 1} - 1)/(k - 1)$ and the lowest non-zero digit may be $k$.
notebooks/num.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Functions def my_capitalizer(sample): words = sample.split(' ') out = [] for i in range(len(words)): fc = words[i].capitalize() s_temp = '' isfirst = True for j in range(1,len(fc)+1): if fc[-j].isalpha() and isfirst: s_temp = s_temp+fc[-j].upper() isfirst = False else: s_temp = s_temp+fc[-j] out.append(s_temp[::-1]) return ' '.join(out) my_capitalizer('salam, how do you do?') def f1(x,y): return x+y,x*y o1,o2 = f1(3,4) print(o1) print(o2) x,y,z = [1,2,3] print(x) print(y) print(z) def add(x=1,y=1,z=1): return (x+y)*z a = add() print(a) def add(x=1,y=1,z=1): print((x+y)*z) a = add(x,2,3) print(a) print(type(a)) def add(x=1,y=1,z=1): x += 1 print((x+y)*z) x = 1 a = add(x,2,3) print(x) def add(x=1,y=1,z=1): x += 1 print((x+y)*z+r) r = 4 a = add(x,2,3) def generator_function(): for i in range(10): yield i**2 generator_function() for i in generator_function(): print(i) def sequence(n): num = 1 while num<n+1: yield num num += 1 for item in sequence(10): print(item) # # comprehensions # ### [f(i) for i in iterable if condition] squared = [] for i in range(10): x = i**2 if x%2==0: squared.append(x) squared squared = [x**2 for x in range(10) if x**2%2==0] squared a = ['ali','hassan','manijeh'] {'key_'+i:'value_'+i for i in a} for i,j in zip(a,b): print(i,j) # + b = ['0921','0912','0935'] dic = {k:v for k,v in zip(a,b)} dic # - for k,v in dic.items(): print(k,v) {v: k for k, v in dic.items()}
rep/2-functions.ipynb
/ --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / --- / + cell_id="00000-255d7681-2232-4e86-9cb3-cecb8c403281" deepnote_cell_type="code" deepnote_to_be_reexecuted=false execution_millis=51 execution_start=1616612249909 source_hash="279b914" tags=[] def cluster(data): #choose dbscan params and l/w params e = 0.005 ms = 10 dbs = DBSCAN(eps=e, min_samples=ms) min_length = 0.07 max_length = 0.22 min_width = 0.15 max_width = 0.4 dbs_labels = dbs.fit_predict(data) for i in sorted(set(dbs_labels)): s = dbs_labels==i length = data[s,0][np.argmax(data[s,0])] - data[s,0][np.argmin(data[s,0])] width = data[s,1][np.argmax(data[s,1])] - data[s,1][np.argmin(data[s,1])] if min_length < length < max_length and min_width < width < max_width: return (data[s,:]) / + [markdown] created_in_deepnote_cell=true deepnote_cell_type="markdown" tags=[] / <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=4248d415-7786-4cc7-97c8-cf51d9e4e56a' target="_blank"> / <img style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,<KEY> > </img> / Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
Tests/ClusterZack.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="hsyhgfZGnayq" # Search results in the pubtator interface are the same as pubmed (with no filters and sorted by most recent). This means we can get pmids from a query term, then get the annotion results by pmid from the pubtator API I think. # + id="6e-N696BotO5" # %%capture # !pip install pubmed-mapper # !pip install scispacy # !pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.4.0/en_ner_bc5cdr_md-0.4.0.tar.g # + id="cLpR3yFclOOa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633121625417, "user_tz": 420, "elapsed": 183, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="fa577a94-4d60-4efb-a3c7-e2ceadcf0ac5" # %load_ext autoreload # %autoreload 2 from google.colab import drive drive.mount('/content/gdrive/') # + colab={"base_uri": "https://localhost:8080/"} id="kzQ6absOnqBX" executionInfo={"status": "ok", "timestamp": 1633121633662, "user_tz": 420, "elapsed": 129, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="6b07131b-d5d3-45be-f67f-eb52a082bf25" # %cd /content/gdrive/MyDrive/AIFS_KE_entailment/ # + colab={"base_uri": "https://localhost:8080/"} id="85SAsLOboFG9" executionInfo={"status": "ok", "timestamp": 1633123258432, "user_tz": 420, "elapsed": 165, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="a745d49f-1c79-4063-f1d7-53304d2c9511" from data_generation import get_pmids_by_query, get_pubtator_annotations, filter_sents # + colab={"base_uri": "https://localhost:8080/"} id="cZ9MTR8CoroX" executionInfo={"status": "ok", "timestamp": 1633121879211, "user_tz": 420, "elapsed": 319, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="aaaecad7-e693-4687-961d-9e0a3defca30" pmids = get_pmids_by_query("mangifera indica") # + colab={"base_uri": "https://localhost:8080/"} id="dXjfQkrpwzMi" executionInfo={"status": "ok", "timestamp": 1633123782849, "user_tz": 420, "elapsed": 3685, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="ca8d30f6-4529-47a3-9c8b-893e8cc5cef4" # !pip install pymed # + id="VEN1jKHRwwyB" # archived, broken # from pymed import PubMed # pubmed = PubMed(tool="MyTool", email="<EMAIL>") # results = pubmed.query("mangifera indica", max_results=20) # pmids = [r.pubmed_id for r in results] # pmids_cleaned = [] # for x in pmids: # for y in x.split('\n'): # pmids_cleaned.append(y) # pmids_cleaned = list(set(pmids_cleaned)) # + id="6Ip15ISwy0ou" import pandas as pd # + id="a7M-4Y8YxUDI" mango_pmids = pd.read_csv('/content/gdrive/MyDrive/AIFS_KE_entailment/pmid-mangiferai-set.txt',header=None)[0].astype(str).values # + colab={"base_uri": "https://localhost:8080/", "height": 128} id="0GI_fMpJzmW1" executionInfo={"status": "ok", "timestamp": 1633124523596, "user_tz": 420, "elapsed": 138, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="559baa57-9e6e-4fb6-ccb5-87d4a7c3d88e" ','.join(mango_pmids) # + id="4YI_byXX0W51" # Fails silently searching 10 at a time... wtf ncbi # + id="Hhgc7wisslro" def get_annotation_type(annotation): return annotation.find(attrs={"key":"type"}).text def get_annotation_text(annotation): return annotation.find("text").text # + id="FyGBKbh_tmrP" from nltk import sent_tokenize # + id="OEKEoiSv1kMb" from tqdm import tqdm # + colab={"base_uri": "https://localhost:8080/"} id="cHDVFu64p51G" executionInfo={"status": "ok", "timestamp": 1633125495553, "user_tz": 420, "elapsed": 13401, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="7bdd906a-947f-488c-ba00-a01df496b4d0" filtered_sents_all = [] for id in tqdm(mango_pmids[0:100]): soup = get_pubtator_annotations([id]) for passage in soup.find_all("passage"): species = [] chemicals = [] for annotation in passage.find_all('annotation'): annot_type = get_annotation_type(annotation) annot_text = get_annotation_text(annotation) if annot_type == 'Species': species.append(annot_text) elif annot_type == 'Chemical': chemicals.append(annot_text) if len(species) > 0 and len(chemicals) > 0: filtered_sents = filter_sents(passage.text,species,chemicals) for sent in filtered_sents: filtered_sents_all.append((id, sent, [chem for chem in set(chemicals) if chem in sent], [s for s in set(species) if s in sent])) # + id="yFUqawcPr2GW" df= pd.DataFrame(filtered_sents_all,columns=["pmid","premise","chemicals","species"]) # + id="k2NHnQ0x14NV" df.to_csv("/content/gdrive/MyDrive/AIFS_KE_entailment/mango_sentences.csv") # + id="nQBYmv4g5jmJ" species = list(set([f for v in df.species for f in v])) # + id="QO3tc31A51Rg" species_excl = [ 'stocks', 'Escherichia coli', 'L. fermentum', 'Lactobacillus fermentum', 'metagenomes', 'PP', 'Sprague Dawley rats', 'Colletotrichum gloeosporioides', 'rats', 'rat', 'sheep', 'Staphylococcus aureus', 'pig', 'Aleurodicus rugioperculatus', 'human', 'flowering plants', 'Lactobacillus acidophilus', 'Lactobacillus plantarum',] # + id="wcGlt3iI7YZb" df.species = df.species.apply(lambda l: [x for x in l if x not in species_excl]) df = df[df.species.apply(len) > 0] # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="KSh9b6PC14hx" executionInfo={"status": "ok", "timestamp": 1633126634736, "user_tz": 420, "elapsed": 168, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="67201b7e-56a4-499e-980f-a4bf416f5aa0" df # + id="ODG-763B8FXD" from itertools import product # + id="qTSHPxrA4Nlf" def generate_candidates_from_pubtator_df(df): newdf = [] for idx, row in df.iterrows(): for food, chemical in product(row["species"],row["chemicals"]): newrow = {"pmid":row["pmid"],"premise":row["premise"],"hypothesis":f"{food} contains {chemical}."} newdf.append(newrow) return pd.DataFrame(newdf) # + id="J7t1mudB8yx0" candidates_df = generate_candidates_from_pubtator_df(df) # + colab={"base_uri": "https://localhost:8080/", "height": 424} id="zBnZrpQR83EJ" executionInfo={"status": "ok", "timestamp": 1633126940699, "user_tz": 420, "elapsed": 134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "18165642881489459967"}} outputId="60a79448-2255-4584-c3e5-5dca78fca850" candidates_df # + id="jZpcyb9A836p" candidates_df.to_csv('/content/gdrive/MyDrive/AIFS_KE_entailment/mango_candidates.csv') # + id="1vDjapJ39SrZ"
ipynb/entailment_notebooks/pubtator_mango.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Complex TFA multiple Inversions # This notebook performs the inversion using Levenberg-Marquadt's algorithm of total field anomaly (TFA). # + import numpy as np import matplotlib.pyplot as plt import cPickle as pickle import os from fatiando.vis import mpl # - from datetime import datetime today = datetime.today() # dd/mm/YY d4 = today.strftime("%d-%b-%Y-%Hh:%Mm") # ### Auxiliary functions # + import sys sys.path.insert(0, '../../code') import mag_polyprism_functions as mfun # - # # Input # ### Importing model parameters with open('../dipping/model.pickle') as w: model = pickle.load(w) # ### Observation points and observed data with open('data.pickle') as w: d = pickle.load(w) data = pd.read_csv('dipping_model_data.txt', skipinitialspace=True, delim_whitespace=True) dobs = data['res_data'].get_values() xp = data['x'].get_values() yp = data['y'].get_values() zp = data['z'].get_values() N = xp.size # ### Parameters of the initial model # + M = 20 # number of vertices per prism L = 5 # number of prisms P = L*(M+2) + 1 # number of parameters # magnetization direction incs = model['inc'] decs = model['dec'] int_min = 10. int_max = 15. intensity = np.linspace(int_min, int_max, 6) # depth to the top, thickness and radius z0_min = 0. z0_max = 100. z0 = np.linspace(z0_min, z0_max, 6) dz = 800. r = 700. x0 = -200. y0 = 0. # main field inc, dec = d['main_field'] # - z0 intensity # ### Outcropping parameters # + # outcropping body parameters m_out = np.zeros(M + 2) m_out = np.zeros(M+2) for i, (x,y) in enumerate(zip(model['prisms'][0].x,model['prisms'][0].y)): m_out[i] = np.sqrt(x**2 + y**2) # - # ### Limits # + # limits for parameters in meters rmin = 10. rmax = 1200. x0min = -4000. x0max = 4000. y0min = -4000. y0max = 4000. dzmin = 200. dzmax = 1000. mmin, mmax = mfun.build_range_param(M, L, rmin, rmax, x0min, x0max, y0min, y0max, dzmin, dzmax) # - # ### Derivatives # + # variation for derivatives deltax = 0.01*np.max(100.) deltay = 0.01*np.max(100.) deltar = 0.01*np.max(100.) deltaz = 0.01*np.max(100.) delta = np.array([deltax, deltay, deltar, deltaz]) # - # ### Regularization parameters # + #lamb = th*0.01 # Marquadt's parameter lamb = 10.0 dlamb = 10. # step for Marquadt's parameter a1 = 1.0e-3 # adjacent radial distances within each prism a2 = 1.0e-3 # vertically adjacent radial distances a3 = 1.0e-6 # outcropping cross-section a4 = 1.0e-6 # outcropping origin a5 = 1.0e-6 # vertically adjacent origins a6 = 1.0e-6 # zero order Tikhonov on adjacent radial distances a7 = 1.0e-5 # zero order Tikhonov on thickness of each prism alpha = np.array([a1, a2, a3, a4, a5, a6, a7]) # - # ### Folder to save the results foldername = 'outcrop-3366665' itmax = 30 itmax_marq = 10 tol = 1.0e-3 # stop criterion # ### Inversion inversion_results = [] for j, z in enumerate(z0): for k, i in enumerate(intensity): alpha = np.array([a1, a2, a3, a4, a5, a6, a7]) print 'inversion: %d top: %d intensity: %d' % (j*z0.size + k, z, i) model0, m0 = mfun.initial_cylinder(M, L, x0, y0, z, dz, r, inc, dec, incs, decs, i) d_fit, m_est, model_est, phi_list, model_list, res_list = mfun.levmarq_tf( xp, yp, zp, m0, M, L, delta, itmax, itmax_marq, lamb, dlamb, tol, mmin, mmax, m_out, dobs, inc, dec, model0[0].props, alpha, z, dz ) inversion_results.append([m_est, phi_list, model_list, dobs - d_fit]) # # Results # + # output of inversion inversion = dict() inversion['x'] = xp inversion['y'] = yp inversion['z'] = zp inversion['observed_data'] = dobs inversion['inc_dec'] = [incs, decs] inversion['z0'] = z0 inversion['initial_dz'] = dz inversion['intial_r'] = r inversion['limits'] = [rmin, rmax, x0min, x0max, y0min, y0max, dzmin, dzmax] inversion['regularization'] = np.array([a1, a2, a3, a4, a5, a6, a7]) inversion['tol'] = tol inversion['main_field'] = [inc, dec] inversion['intensity'] = intensity inversion['results'] = inversion_results # - # ### Saving results if foldername == '': mypath = 'results/multiple-'+d4 #default folder name if not os.path.isdir(mypath): os.makedirs(mypath) else: mypath = 'results/multiple-'+foldername #defined folder name if not os.path.isdir(mypath): os.makedirs(mypath) file_name = mypath+'/inversion.pickle' with open(file_name, 'w') as f: pickle.dump(inversion, f)
code/dipping-regional/multiple_inversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os #os.chdir('/Users/wiifreaki/Desktop/jj') print(os.getcwd()) #for f in os.listdir(): # print(f) # + os.chdir('/Users/wiifreaki/Desktop/jj') print(os.getcwd()) #for f in os.listdir(): # for f in os.listdir(): # - for i in os.listdir('/Users/wiifreaki/Desktop/jj'): print(i) # + for i in os.listdir('/Users/wiifreaki/Desktop/jj'): os.rename(i, i.replace('(z-lib.org)', ' ')) print(i) # -
RenameBooks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Imports # Suppress TensorFlow warnings. # + # Copied from: # https://weepingfish.github.io/2020/07/22/0722-suppress-tensorflow-warnings/ # Filter tensorflow version warnings import os # https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # or any {'0', '1', '2'} import warnings # https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning warnings.simplefilter(action="ignore", category=FutureWarning) warnings.simplefilter(action="ignore", category=Warning) import tensorflow as tf tf.get_logger().setLevel("INFO") tf.autograph.set_verbosity(0) import logging tf.get_logger().setLevel(logging.ERROR) # + from tensorflow.keras import layers from tensorflow import keras import tensorflow_hub as hub from torchvision.datasets import ImageFolder from torchvision import transforms from torch.utils.data import DataLoader from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD # - # ## Constants AUTO = tf.data.AUTOTUNE BATCH_SIZE = 256 IMAGE_SIZE = 224 TF_MODEL_ROOT = "gs://deit-tf" # ## DeiT models # + model_paths = tf.io.gfile.listdir(TF_MODEL_ROOT) deit_paths = [ path for path in model_paths if str(IMAGE_SIZE) in path and "fe" not in path ] print(deit_paths) # - # ## Image loader # To have an apples-to-apples comparison with the original PyTorch models for evaluation, it's important to ensure we use the same transformations. # + # Transformations from: # (1) https://github.com/facebookresearch/deit/blob/colab/notebooks/deit_inference.ipynb # (2) https://github.com/facebookresearch/deit/blob/main/datasets.py size = int((256 / 224) * IMAGE_SIZE) transform_chain = transforms.Compose( [ transforms.Resize(size, interpolation=3), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD), ] ) # + dataset = ImageFolder("val", transform=transform_chain) dataloader = DataLoader(dataset, batch_size=BATCH_SIZE) batch = next(iter(dataloader)) print(batch[0].shape) # - # ## Run evaluation def get_model(model_url): inputs = tf.keras.Input((IMAGE_SIZE, IMAGE_SIZE, 3)) hub_module = hub.KerasLayer(model_url) outputs, _ = hub_module(inputs) return tf.keras.Model(inputs, outputs) # + # Copied and modified from: # https://github.com/sebastian-sz/resnet-rs-keras/blob/main/imagenet_evaluation/main.py log_file = f"deit_tf_{IMAGE_SIZE}.csv" if not os.path.exists(log_file): with open(log_file, "w") as f: f.write("model_name,top1_acc(%),top5_acc(%)\n") for deit_path in deit_paths: print(f"Evaluating {deit_path}.") model = get_model(f"{TF_MODEL_ROOT}/{deit_path}") top1 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1, name="top1") top5 = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=5, name="top5") progbar = tf.keras.utils.Progbar(target=len(dataset) // BATCH_SIZE) for idx, (images, y_true) in enumerate(dataloader): images = images.numpy().transpose(0, 2, 3, 1) y_true = y_true.numpy() y_pred = model.predict(images) top1.update_state(y_true=y_true, y_pred=y_pred) top5.update_state(y_true=y_true, y_pred=y_pred) progbar.update( idx, [("top1", top1.result().numpy()), ("top5", top5.result().numpy())] ) print() print(f"TOP1: {top1.result().numpy()}. TOP5: {top5.result().numpy()}") top_1 = top1.result().numpy() * 100.0 top_5 = top5.result().numpy() * 100.0 with open(log_file, "a") as f: f.write("%s,%0.3f,%0.3f\n" % (deit_path, top_1, top_5))
i1k_eval/eval-deit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit # name: python_defaultSpec_1599531811770 # --- # # Lab1 # --- # # Ok for this lab we're going to reiterate a lot of the things that we went over in class. # # Our Goals are: # - Defining variables # - Using math # - Manipulating variables # - Changing data types # - Playing with lists # + # Define 4 variables, one of each type listed: [Integer, String, Boolean, Float]. Print all 4. #test #Integer age = 19 #String name = "Owen" #Boolean goes_to_marist = True #Float height = 74.5 # + tags=[] # With whatever variables you'd like demonstrate the following: [Addition, Subtraction, Division, Multiplication, Modulo]. Print all 5. Input1 = 7 Input2 = 10 print(Input1 + Input2) print(Input1 - Input2) print(Input1 / Input2) print(Input1 * Input2) print(Input1 % Input2) # + tags=[] # Using 2 variables and addition print the number 57 Integer1 = 50 Integer2 = 7 Solution = (Integer1 + Integer2) if Solution < 58: print(Integer1 + Integer2) else: print("Wrong") # + tags=[] # Change this variable to a string and print number_into_string = 37 number_into_string = str(number_into_string) print(type(number_into_string)) # Change this variable to a integer and print string_into_number = '10' string_into_number = int(string_into_number) print(type(string_into_number)) # Change this variable to a float and print string_into_float = '299.99' string_into_float = float(string_into_float) print(type(string_into_float)) # Change this variable to a boolean and print number_into_boolean = 37 number_into_boolean = bool(number_into_boolean) print(type(number_into_boolean)) # + tags=[] # Make me a list of any type Fast_Food = ["Mcdonalds", "Burger_King", "Wendys", "Chik-fil-a", "White_Castle"] print(Fast_Food)
JupyterNotebooks/Labs/Lab 1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Bayesian Regression # *<NAME>* # # In the Bayesian approach to regression (also referred to as Bayesian ridge regression, do to an equivalence with ridge regression), the prior distribution of the weights $\beta$ is a Normal distribution. If the error terms $\epsilon_i$ are assumed to be Normally distributed, the posterior distribution of the parameters is also a Normal distribution, with updated parameters. We can make predictions using the **maximum *a posteriori* (MAP)** estimates of the parameters (the values that maximize the posterior distribution's density function). # # **Occam's razor** refers to an empirical idea that simple models that explain phenomena are preferred to complex models that explain the same phenomena. This idea appears in Bayesian regression: the prior distribution of the parameters intentionally weights the parameters to zero. This biases the resulting linear model to be "simple", in that features will have negligible weights unless the features have a non-negligible predictive ability. Regression with OLS alone does not have this property; misspecified models will become as complex as necessary to overfit data. # # In other words, OLS can be prone to overfitting while Bayesian regression offers an approach to combat overfitting. # # ## Choosing a Polynomial # # Below I load in an artificial dataset consisting of two variables, one of them the target variable. import numpy as np import matplotlib.pyplot as plt # %matplotlib inline dat = np.load("mystery_function.npy") x, y = dat[:, 0], dat[:, 1] plt.scatter(x, y) plt.show() # The data in this plot is clearly not linear but could have been generated by some other polynomial relationship. Unfortunately we don't know what polynomial relationship is appropriate and choosing the wrong one can lead to overfitting. # # We see this below. from sklearn.linear_model import LinearRegression # + olsfit1, olsfit2, olsfit3, olsfit4, olsfit5, olsfit6 = (LinearRegression(),)*6 def gen_order_mat(x, order=1): """Generates a matrix of x useful for fitting a polynomial of some order""" # Similar functionality is supplied by the vander() function in NumPy if order == 1: return x.reshape(-1, 1) else: return np.array([x**i for i in range(1, order + 1)]).T # The number designates the order of the fit of the polynomial olsfit1 = LinearRegression().fit(gen_order_mat(x, order=1), y) olsfit2 = LinearRegression().fit(gen_order_mat(x, order=2), y) olsfit3 = LinearRegression().fit(gen_order_mat(x, order=3), y) olsfit4 = LinearRegression().fit(gen_order_mat(x, order=4), y) olsfit5 = LinearRegression().fit(gen_order_mat(x, order=5), y) olsfit10 = LinearRegression().fit(gen_order_mat(x, order=10), y) olsfit12 = LinearRegression().fit(gen_order_mat(x, order=12), y) def plotfit(fit, order=1): """Plots the function estimated by OLS.""" fx = np.linspace(x.min(), x.max(), num = 100) fx_mat = gen_order_mat(fx, order=order) yhat = fit.predict(fx_mat) plt.scatter(x, y) plt.plot(fx, yhat) plt.ylim(y.min() - 0.5, y.max() + 0.5) plt.show() plotfit(olsfit1, order=1) # - plotfit(olsfit2, order=2) plotfit(olsfit3, order=3) plotfit(olsfit4, order=4) plotfit(olsfit5, order=5) plotfit(olsfit10, order=10) plotfit(olsfit12, order=12) # Increasing the order of the polynomial leads to a better fit up until a certain point when new potential features lead to overfitted models. Bayesian ridge regression combats this phenomenon by biasing all parameters to 0, so when fitting a model, parameters get non-negligible contributions to the final fit only when they help in prediction. # # ## Performing Bayesian Regression # # The `BayesianRidge` object allows for performing Bayesian ridge regression. from sklearn.linear_model import BayesianRidge BayesianRidge() # + bayesfit = BayesianRidge(alpha_1 = 1, alpha_2 = 1, lambda_1 = 30, lambda_2 = 50).fit(gen_order_mat(x, order=10), y) plotfit(bayesfit, order=10) # - # `alpha_1`, `alpha_2`, `lambda_1`, and `lambda_2` are the hyperparameters of Bayesian ridge regression as supplied by **scikit-learn** (corresponding to parameters of the prior distribution of the parameters). Changing these parameters can lead to better fits. The above function more closely resembles the "best" fit (the cubic curve, which was used to generate the data).
Section 4/BayesianRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # k3d field # + import k3d import discretisedfield as df import matplotlib.pyplot as plt p1 = (-10e-9, -10e-9, -20e-9) p2 = (10e-9, 10e-9, 20e-9) cell = (2e-9, 2e-9, 2e-9) region = df.Region(p1=p1, p2=p2) mesh = df.Mesh(region, cell=cell) def norm_fun(point): x, y, z = point if x**2 + y**2 < (10e-9**2): return 1e6 else: return 0 def value_fun(point): x, y, z = point c = 1e9 return (c*x, c*y, c*z) field = df.Field(mesh, dim=3, value=value_fun, norm=norm_fun) # + #field.norm.plane('z').k3d_nonzero() # + # plot = k3d.plot() # @df.interact(z=mesh.slider('z')) # def myplot(z): # field.norm.plane(z=z).k3d_nonzero(plot=plot, interactive_field=field) # plot.display() # - field.x.plane('y').k3d_scalar() # + #field.x.plane('z').mpl() # + # plot = k3d.plot() # @df.interact(z=mesh.slider('z')) # def myplot(z): # field.x.plane(z=z).k3d_scalar(plot=plot, filter_field=field.norm, interactive_field=field) # plot.display() # + # field.plane('z').k3d_vector() # + # plot = k3d.plot() # @df.interact(z=mesh.slider('z')) # def myplot(z): # field.plane(z=z).k3d_vector(plot=plot, color_field=field.x, cmap='hsv', interactive_field=field) # plot.display() # -
dev/k3d-field.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "279ded90-40fa-4106-8f1d-3d089d178bf2", "showTitle": true, "title": "SingleStore connection details"} server = "<TO DO>" password = "<TO DO>" port = "3306" cluster = server + ":" + port
notebooks/Setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:phathom] # language: python # name: conda-env-phathom-py # --- import numpy as np import tifffile img_path = 'fixed_middle.tif' img = tifffile.imread(img_path) img.shape import mba cmin = np.zeros(2) cmax = np.array(img.shape) # + # use random points for the correspondences nb_matches = 20 max_disp = 10 # pixels fixed_pts = np.floor(np.random.uniform(0, 1, (nb_matches, 2))*cmax).astype('int') displacements = np.arange(nb_matches*2).reshape((2, nb_matches)).T # - grid = [3, 3] interp = mba.mba2(cmin, cmax, grid, fixed_pts, displacements[:, 0]) print(interp) import matplotlib.pyplot as plt error = abs(displacements[:, 0]-interp(fixed_pts)) plt.hist(error.ravel(), bins=100) plt.show() grid = [5, 5] interp_y = mba.mba2(cmin, cmax, grid, fixed_pts, displacements[:, 0]) interp_x = mba.mba2(cmin, cmax, grid, fixed_pts, displacements[:, 1]) n = 100 sy = np.linspace(0,img.shape[0],n) sx = np.linspace(0,img.shape[1],n) x = np.array(np.meshgrid(sy,sx)).transpose([1,2,0]).copy() def plot_surface(interp): # m0 is the initial grid size (nb control points to start with) # error = np.amax(np.abs(val - interp(coo))) / np.amax(np.abs(val)) v = interp(x) plt.pcolormesh(sy, sx, v, cmap='RdBu') # plt.scatter(x=coo[:,0], y=coo[:,1], c=val, cmap='RdBu') # plt.xlim([0,1]) # plt.ylim([0,1]) # plt.title("$m_0 = {0:}$, error = {1:.3e}".format(m0, error)) plt.colorbar(); plt.figure(figsize=(11,5)) plt.subplot(121); plot_surface(interp_y) plt.subplot(122); plot_surface(interp_x) plt.tight_layout()
notebooks/mba_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import subprocess sys.path.append('/usr/gapps/spot/hatchet') import subprocess, json import pandas as pd from hatchet import * cali_query = "/usr/gapps/spot/caliper/bin/cali-query" cali_file = "CALI_FILE_NAME" default_metric = "CALI_METRIC_NAME" grouping_attribute = "function" query = "select %s,sum(%s) group by %s format json-split" % (grouping_attribute, default_metric, grouping_attribute) cali_json = subprocess.check_output([cali_query, "-q", query, cali_file]) gf = GraphFrame() gf.from_caliper(cali_json, input_type='literal') print(gf.graph.to_string(gf.graph.roots, gf.dataframe, metric=default_metric))
templates/TemplateNotebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # prerequisite package imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb # %matplotlib inline from solutions_biv import violinbox_solution_1 # - # We'll continue to make use of the fuel economy dataset in this workspace. fuel_econ = pd.read_csv('./data/fuel_econ.csv') fuel_econ.head() # **Task**: What is the relationship between the size of a car and the size of its engine? The cars in this dataset are categorized into one of five different vehicle classes based on size. Starting from the smallest, they are: {Minicompact Cars, Subcompact Cars, Compact Cars, Midsize Cars, and Large Cars}. The vehicle classes can be found in the 'VClass' variable, while the engine sizes are in the 'displ' column (in liters). **Hint**: Make sure that the order of vehicle classes makes sense in your plot! # YOUR CODE HERE #Convert the "VClass" column from a plain object type into an ordered categorical type car_sizes = ['Minicompact Cars', 'Subcompact Cars', 'Compact Cars', 'Midsize Cars', 'Large Cars'] car_categories = pd.api.types.CategoricalDtype(ordered = True, categories=car_sizes); fuel_econ['VClass'] = fuel_econ['VClass'].astype(car_categories); fuel_econ['VClass'] sb.violinplot(data = fuel_econ, x = 'VClass', y = 'displ', color='grey', inner = 'quartile'); plt.xticks(rotation=15); # run this cell to check your work against ours violinbox_solution_1()
Matplotlib/Violin_and_Box_Plot_Practice.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # InstaBot - Part 2 # + from selenium import webdriver from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.common.exceptions import TimeoutException from bs4 import BeautifulSoup from collections import OrderedDict import time from datetime import datetime, timedelta import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # - #opening the browser, change the path as per location of chromedriver in your system driver = webdriver.Chrome(executable_path = 'C:/Users/admin/Downloads/Chromedriver/chromedriver.exe') driver.maximize_window() #opening instagram driver.get('https://www.instagram.com/') #update your username and password here username = 'SAMPLE USERNAME' password = '<PASSWORD>' #initializing wait object wait = WebDriverWait(driver, 10) #LogIn() function is same as part-I. All the functionality is same as before. def LogIn(username, password): try : #locating username textbox and sending username user_name = wait.until(EC.presence_of_element_located((By.NAME,'username'))) user_name.send_keys(username) #locating password box and sending password pwd = driver.find_element_by_name('password') pwd.send_keys(password) #locating login button button = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id="loginForm"]/div[1]/div[3]/button/div'))) button.submit() #Save Your Login Info? : Not Now pop = wait.until(EC.presence_of_element_located((By.XPATH,'//*[@id="react-root"]/section/main/div/div/div/div/button'))) pop.click() except TimeoutException : print ("Something went wrong! Try Again") #Login to your Instagram Handle LogIn(username, password) ##search_open_profile() function is same as part-I. All the functionality is same as before. def search_open_profile(s): try: #locatong search box bar and sending text search_box = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'XTCLo'))) search_box.send_keys(s) #locating serched result res = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'yCE8d'))) res.click() time.sleep(5) #driver.back() except TimeoutException : print('No Search Found!') # ### Problem 1 : Analyse the habits of bloggers # Now your friend has followed a lot of different food bloggers, he needs to analyse the habits of these bloggers. # 1.From the list of instagram handles you obtained when you searched ‘food’ in previous project. Open the first 10 handles and find the top 5 which have the highest number of followers # 2.Now Find the number of posts these handles have done in the previous 3 days. # 3.Depict this information using a suitable graph. # ##### 1.From the list of instagram handles you obtained when you searched ‘food’ in previous project. Open the first 10 handles and find the top 5 which have the highest number of followers #First 10 handles from part-1 handles_list = ['dilsefoodie','foodtalkindia','foodmaniacinthehouse','food.darzee','yourfoodlab','dilsefoodie_','food', 'foodnetwork','foodinsider','foodiesfeature'] #function for getting top 5 handle which have highest number of followers def top_5_handles(lst): try: #dct contains username with number of followers dct = {} for i in lst : #This fuction will search a user and open that profile search_open_profile(i) time.sleep(2) #locating followers of user followers = wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME,'g47SY')))[1] #replacing ',' with '' and converted into int dct[i] = int(followers.get_attribute('title').replace(',' , '')) #top 5 most followed user count = 0 res = OrderedDict() for i in sorted(dct, key=dct.get, reverse=True): res[i] = dct[i] count += 1 if(count >= 5) : break return res except TimeoutException : print ("Something went wrong! Try Again") top_handles = top_5_handles(handles_list) print('Top 5 instagram handles which have the highest number of followers :') for uname in top_handles: print(uname,'->',top_handles[uname]) # ##### 2.Now Find the number of posts these handles have done in the previous 3 days. #this function returns number of post posted by user in n days def count_of_post(day): try : #locating first post post = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'v1Nh3'))) post.click() count = 0 while True : #getting date of the the post temp = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'_1o9PC'))) #getting date in string format date_string = temp.get_attribute('datetime') #converting into date format date = datetime.strptime(date_string[0:19], "%Y-%m-%dT%H:%M:%S") #taking differnce between today's date and post posted date if (datetime.now() - date).days > day : break else : count += 1 #locating next button next_but = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'coreSpriteRightPaginationArrow'))) next_but.click() #closing the post close=driver.find_element_by_xpath("/html/body/div[4]/div[3]/button") close.click() return count except TimeoutException : print ("Something went wrong! Try Again") total_post_count = {} for i in top_handles : search_open_profile(i) time.sleep(2) count = count_of_post(3) total_post_count[i] = count print('Number of posts these handles have done in the previous 3 days : -') for i in total_post_count: print(i,total_post_count[i]) # ##### 3.Depict this information using a suitable graph. # using total_post_count to get username and number of post x = total_post_count.keys() y = total_post_count.values() plt.bar(x,y,color='g',alpha=0.6) plt.ylabel("Number of posts in the previous 3 days",size=10) plt.xlabel("Username",size=10) plt.title('Most Followed user vs Number of recent posts ',fontsize=20, color='green',alpha=0.8) plt.grid() plt.show() # ### Problem 2 : List of hashtags # Your friend also needs a list of hashtags that he should use in his posts. # 1.Open the 5 handles you obtained in the last question, and scrape the content of the first 10 posts of each handle. # 2.Prepare a list of all words used in all the scraped posts and calculate the frequency of each word. # 3.Create a csv file with two columns : the word and its frequency # 4.Now, find the hashtags that were most popular among these bloggers # 5.Plot a Pie Chart of the top 5 hashtags obtained and the number of times they were used by these bloggers in the scraped posts. # + def extract_content(lst): try : hashtags_list = [] for user in lst: #This fuction will search a user and open that profile search_open_profile(user) time.sleep(2) #just for scrolling to getting post driver.execute_script('window.scrollTo(0, 3000);') time.sleep(2) driver.execute_script('window.scrollTo(0, -3000);') time.sleep(2) #locating the post of the user posts = driver.find_elements_by_class_name('v1Nh3') for post in posts[:10]: post.click() time.sleep(2) #locating all hashtag used in this post hashtags=driver.find_elements_by_class_name("xil3i") for hashtag in hashtags: #appending in list hashtags_list.append(hashtag.text) #locating close button and clicking on it close=driver.find_element_by_xpath("/html/body/div[4]/div[3]/button") time.sleep(1) close.click() return hashtags_list except TimeoutException : print ("Something went wrong! Try Again") # - handles = list(total_post_count.keys()) hashtags = extract_content(handles) print(hashtags) # ##### 2.Prepare a list of all words used in all the scraped posts and calculate the frequency of each word. # + dct = {} words_list = [] for word in hashtags: dct[word] = dct.get(word,0) + 1 for i in sorted(dct, key=dct.get, reverse=True): words_list.append([i,dct[i]]) print(i,dct[i]) # - # ##### 3.Create a csv file with two columns : the word and its frequency df = pd.DataFrame(words_list,columns=['Words','Frequency']) #using .to_csv pandas function to convert dataframe to csv df.to_csv('words_frequency.csv',index=False) df.head(10) # ##### 4.Now, find the hashtags that were most popular among these bloggers print(words_list[0][0]) # ##### 5.Plot a Pie Chart of the top 5 hashtags obtained and the number of times they were used by these bloggers in the scraped posts. #using answer of problem 2.2 hashtag_name = [words_list[0][0],words_list[1][0],words_list[2][0],words_list[3][0],words_list[4][0]] count = [words_list[0][1],words_list[1][1],words_list[2][1],words_list[3][1],words_list[4][1]] plt.figure(figsize=(12,6)) plt.pie(count,labels=hashtag_name,autopct='%0.2f%%',radius=1,counterclock=False,startangle=110) plt.title('Top 5 Hashtags used by bloggers ',fontsize=20, color='green',alpha=0.8) plt.show() # ### Problem 3 : Followers : Likes ratio # You need to also calculate average followers : likes ratio for the obtained handles. # Followers : Likes ratio is calculated as follows: # 1.Find out the likes of the top 10 posts of the 5 handles obtained earlier. # 2.Calculate the average likes for a handle. # 3.Divide the average likes obtained from the number of followers of the handle to get the average followers:like ratio of each handle. # 4.Create a bar graph to depict the above obtained information. # ##### 1.Find out the likes of the top 10 posts of the 5 handles obtained earlier. # + #This function returns list of likes of 10 post of a user def get_likes(user): try : likes = [] #This fuction will search a user and open that profile search_open_profile(user) time.sleep(2) #just for scrolling driver.execute_script('window.scrollTo(0, 3000);') time.sleep(2) driver.execute_script('window.scrollTo(0, -3000);') time.sleep(2) #locating the post of the user posts = driver.find_elements_by_class_name('v1Nh3') for post in posts[:10]: post.click() time.sleep(2) try : #for getting like on video we need to first click on view then only we can like views = wait.until(EC.presence_of_element_located((By.CLASS_NAME,'vcOH2'))) views.click() time.sleep(1) #locating like and clicking on it like = driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[2]/div/div/div[4]/span') no_of_likes = (like.text).replace(',' , '') likes.append(int(no_of_likes)) time.sleep(1) #just clicking one body to get original post back driver.find_element_by_xpath('/html/body/div[4]').click() #if the post is image then this part will execute except TimeoutException : like = driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[2]/div/div/button/span') no_of_likes = (like.text).replace(',' , '') likes.append(int(no_of_likes)) time.sleep(1) #locating close button and clicking on it close=driver.find_element_by_xpath("/html/body/div[4]/div[3]/button") time.sleep(1) close.click() return likes except TimeoutException : print ("Something went wrong! Try Again") # - #dct will store username with list of likes dct = {} for user in handles : likes = get_likes(user) dct[user] = likes print('Number of likes of the top 10 posts of',user,':') for i in likes : print(i,'Likes') print() # ##### 2.Calculate the average likes for a handle. #This function will return a dictionary of {'username':average like} def get_avg_likes(d): avg_likes = {} for key,val in d.items(): avg = sum(val) // 10 avg_likes[key] = avg return avg_likes # + avg_likes = get_avg_likes(dct) print('Average likes for each handle are :') for user,likes in avg_likes.items(): print(user, ':', likes) # - # ##### 3.Divide the average likes obtained from the number of followers of the handle to get the average followers:like ratio of each handle. #This function will return dictionary of {'username':avg followers-like ratio} def getavg_foll_like_ratio(top_handles,avg_like,users): foll_like = {} for i in range(5) : foll_like[users[i]] = top_handles[users[i]]//avg_like[users[i]] return foll_like #here I am using the answer of problem 1.1 which contains a dictionary of {'username':followers} ie top_handles #extracing username from top_handles top_handles_names = list(top_handles.keys()) #now calling function getavg_foll_like_ratio(top_handles,avg_like,users) avg_fol_like_ratio = getavg_foll_like_ratio(top_handles, avg_likes, top_handles_names) print('Average followers:like ratio of each handle :') for user,ratio in avg_fol_like_ratio.items(): print(user, ':', ratio) # ##### 4.Create a bar graph to depict the above obtained information. # + #Plotting Bar Graph for average likes of each handle # + #for this I will use answer of problem 3.2 username = list(avg_likes.keys()) avg_like = list(avg_likes.values()) plt.bar(username,avg_like,color='g',alpha=0.6) plt.ylabel("Average Likes",size=15) plt.xlabel("Handle's Name",size=15) plt.title('Average like for each handle ',fontsize=20, color='green',alpha=0.8) plt.grid() plt.show() # + # Plotting Bar Graph for average followers:like ratio of each handle # + #For this I will use answer of problem 3.3 username = list(avg_fol_like_ratio.keys()) avg_like = list(avg_fol_like_ratio.values()) plt.bar(username,avg_like,color='Indigo',alpha=0.6) plt.ylabel("Average Followers:Like Ratio",size=12) plt.xlabel("Handle's Name",size=12) plt.title('Average Followers:Like Ratio of Each Handle ',fontsize=20, color='Indigo',alpha=0.8) plt.grid() plt.show() # -
Projects/InstaBot 2/Project_InstaBot_Part_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Scikit-learn MNIST Model Deployment # # * Wrap a Scikit-learn MNIST python model for use as a prediction microservice in seldon-core # * Run locally on Docker to test # * Deploy on seldon-core running on minikube # # ## Dependencies # # * [Helm](https://github.com/kubernetes/helm) # * [Minikube](https://github.com/kubernetes/minikube) # * [S2I](https://github.com/openshift/source-to-image) # # ```bash # pip install sklearn # pip install seldon-core # ``` # # ## Train locally # # + from sklearn.ensemble import RandomForestClassifier from sklearn import datasets, metrics from sklearn.utils import shuffle from sklearn.datasets import fetch_mldata from sklearn.externals import joblib from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/") mnist_images = mnist.train.images mnist_labels = mnist.train.labels # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(mnist_images) data = mnist_images.reshape((n_samples, -1)) targets = mnist_labels data,targets = shuffle(data,targets) classifier = RandomForestClassifier(n_estimators=30) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples // 2], targets[:n_samples // 2]) # Now predict the value of the digit on the second half: expected = targets[n_samples // 2:] test_data = data[n_samples // 2:] print(classifier.score(test_data, expected)) predicted = classifier.predict(data[n_samples // 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) joblib.dump(classifier, 'sk.pkl') # - # Wrap model using s2i # !s2i build . seldonio/seldon-core-s2i-python3:0.10 sk-mnist:0.1 # !docker run --name "mnist_predictor" -d --rm -p 5000:5000 sk-mnist:0.1 # Send some random features that conform to the contract # !seldon-core-tester contract.json 0.0.0.0 5000 -p # !docker rm mnist_predictor --force # ## Test using Minikube # # **Due to a [minikube/s2i issue](https://github.com/SeldonIO/seldon-core/issues/253) you will need [s2i >= 1.1.13](https://github.com/openshift/source-to-image/releases/tag/v1.1.13)** # !minikube start --memory 4096 # !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default # !helm init # !kubectl rollout status deploy/tiller-deploy -n kube-system # !helm install ../../../helm-charts/seldon-core-operator --name seldon-core --set usageMetrics.enabled=true --namespace seldon-system # !kubectl rollout status statefulset.apps/seldon-operator-controller-manager -n seldon-system # ## Setup Ingress # Please note: There are reported gRPC issues with ambassador (see https://github.com/SeldonIO/seldon-core/issues/473). # !helm install stable/ambassador --name ambassador --set crds.keep=false # !kubectl rollout status deployment.apps/ambassador # ## Wrap Model and Test # !eval $(minikube docker-env) && s2i build . seldonio/seldon-core-s2i-python3:0.10 sk-mnist:0.1 # !kubectl create -f sk_mnist.json # !kubectl rollout status deploy/sk-mnist-single-model-3812de6 # !seldon-core-api-tester contract.json `minikube ip` `kubectl get svc ambassador -o jsonpath='{.spec.ports[0].nodePort}'` \ # sk-mnist --namespace default -p # !minikube delete
examples/models/sk_mnist/skmnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data Analysis - [`FinalProject`] # # > This document contains the main findings regarding the behavior of the target dataset, which contains features and data descriptive of your population and with potential predictive capabilities. # # > All the bellow analysis are **suggestions** to help guide you in your analysis. If a heading or sub-heading does not apply, feel free to not include in your notebook. # ## Useful packages # # * Pandas profiling # * Seaborn # * Pandas Plot # * Altair # * Plotly # * Scipy # ## Summary # Provide a description of your target dataset, including its purpose, max 3 lines. # # "This dataset contains information about a company's operation, growth, financial health and location in order to predict its revenue." # ## Feature (Xs Variables) Analysis # # Run a univariate analysis suit, such as the [Pandas Profiling](https://github.com/pandas-profiling/pandas-profiling), on a sample and talk about the main insights from the data gathered. # # Talk about interesting findings and show the workspace bellow. # ### Correlogram # Include a correlogram with the main features of the target dataset. Include observed outputs in the last column. # # A correlogram describes correlations between variables. The variables grouped in this case form correlation clusters. The above example indicates that the main relationships can be grouped in 4 variables within each cluster. # # ### QQ plots - How big is your data? # # According to the [law of large numbers](https://en.wikipedia.org/wiki/Law_of_large_numbers), features in very large datasets will behave according to a normal distribution. Many models used for big data are optimal only for normally distributed variables, e.g. PCA, so it can be a good idea to verify that beforehand. # # Include a graph with normal QQ-plots of your main numeric variables. # This graph allows you to verify whether your data is distributed according to a Normal/Gaussian variable. The redline is the baseline model, Gaussian in this case. An alignment of samples with the baseline indicates an accordance of the distributions. # # ## Target (Y) variable analysis # # Check for skewedness of data, imbalaced labels, correlation with the calculated features and any other problem it might arrise. # ### Representativeness # # This is one of the most important steps of your data quality report. It describes whether the domains of your datasets allow for the specifications to be met. # # Describe how the Features selected and created answer the problem in hand. # #### Coverage of problem's scope # # Describe whether your datasets contain enough samples in regions of the space that can cover the entire problem scope. # # "Our problem aims at predicting every state of Brazil for every cnae. So there are two potential [strata](https://en.wikipedia.org/wiki/Stratified_sampling): state and CNAE." # # "Analysing how the variable `state` is covered by our datasets." # # State | Samples in target set | Samples in observed set | Ratio observed/target # ------|--------------------|-------------------|----- # `SP` | 80M (80%) | 2K (20%) | 0.0001 # `SC` | 10K (<1%) | 8K (80%) | 0.8 # `AM` | 0 (0%) | 0 (0%) | ind # `Other` | ~20M (15%) | 0 (0%) | 0 # # "A few takeways: # # 1. You can see that the state `AM` is not covered by the datasets. # 2. You also notice that the observed set contains most of its samples in SC, i.e. there is an imbalance of your observed dataset. # # There is not much you can do about Item 1, except re-specify the problem or collect more data. Item 2 will affect how well your model can reproduce targets from states other than `SC`, this might lead to a poor model outside that region. # # Note that such coverage analysis can be applied for every feature. It is good practice to check this for the most important features and strata.
analysis/data_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lecture 3. Dummy Q-Learning # action을 했을 때 마다 Reward를 주는 것이 아니라 잘 했을때 +를 주고 평상시에는 0을 준다. # # Q-function : state에서 특정 action을 했을때, 받을 수 있는 Value를 알려줌. (Policy) # 1. Max Value를 찾는다. max a # 2. Max를 주는 Action을 찾는다. argmax a # # Q^hat(s, a) <- r + max_a'Q^hat(s', a') # ## Lab # + import gym import numpy as np import matplotlib.pyplot as plt from gym.envs.registration import register import random as pr def rargmax(vector): m = np.amax(vector) indices = np.nonzero(vector == m)[0] return pr.choice(indices) register( id='FrozenLake-v3', entry_point='gym.envs.toy_text:FrozenLakeEnv', kwargs={'map_name': '4x4', 'is_slippery': False} ) # - env = gym.make('FrozenLake-v3') # + # Initialize table with all zeros Q = np.zeros([env.observation_space.n, env.action_space.n]) # Set learning parameters num_episodes = 2000 # create lists to contain total rewards and steps per episode rList = [] for i in range(num_episodes): # Reset environment and get first new observation state = env.reset() rAll = 0 done = False # The Q-Table learning algorithm while not done: action = rargmax(Q[state, :]) # Get new state and reward from environment new_state, reward, done, _ = env.step(action) # Update Q-Table with new knowledge using learning rate Q[state, action] = reward + np.max(Q[new_state, :]) rAll += reward state = new_state rList.append(rAll) # - print("Success rate: " + str(sum(rList)/num_episodes)) print("Final Q-Table Values") print("LEFT DOWN RIGHT UP") print(Q) plt.bar(range(len(rList)), rList, color="blue") plt.show()
hunkim_ReinforcementLearning/Lecture03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Find protein homologs by computing levenshtein distance via dynamic programming import numpy as np aa=['G','A','V','L','I','S','C','T','M','P','F','Y','W','H','K','R','D','E','N','Q','X'] classes=['aliphatic','aliphatic','aliphatic','aliphatic','aliphatic','Hydroxyl','Hydroxyl','Hydroxyl','Hydroxyl','Cyclic','Aromatic','Aromatic','Aromatic','Basic','Basic','Basic','Acidic','Acidic','Acidic','Acidic','Any'] aa_dict = {} for i in range(len(aa)): aa_dict[aa[i]] = classes[i] def levenshtein(sequence1,sequence2): if len(sequence1)<len(sequence2): return levenshtein(sequence2,sequence1) if len(sequence2)==0: return len(sequence1) memo=np.zeros((len(sequence1)+1,len(sequence2)+1),np.int8) memo[0]=range(len(sequence2)+1) for i,c1 in enumerate(sequence1): memo[i+1, 0]=i+1 for j,c2 in enumerate(sequence2): insertions=memo[i,j+1]+1 deletions=memo[i+1,j]+1 if aa_dict[sequence1[i]] != aa_dict[sequence2[j]]: substitutions=memo[i,j]+2*(c1 != c2) elif aa_dict[sequence1[i]] == aa_dict[sequence2[j]]: substitutions=memo[i,j]+(c1 != c2) memo[i+1,j+1]=min(insertions,deletions,substitutions) return memo[-1,-1] # + #Read in fasta file seq_dict={} with open('proteins.fasta', "r") as file: for line in file: if line.startswith('>'): x=line.strip('\n') y=next(file) y=y.strip('\n') seq_dict[x] = y #test levenshtein seq1=seq_dict['>NP_000940.1 prolactin receptor isoform 1 precursor [Homo sapiens]'] seq2=['P','I','D','N','Y','L','K','L','L','K','C','R','I','I','H','N','N','N','C'] levenshtein(seq1,seq2) # + import pandas as pd def output_k_homologs(reference_sequence,k): new_seq_dict=dict((x, v) for x, v in seq_dict.items() if v != reference_sequence) results=pd.DataFrame([]) for i,j in new_seq_dict.items(): results = results.append(pd.DataFrame({'proteinName': i, 'levenshteinDistance': levenshtein(reference_sequence,j)}, index=[0]), ignore_index=True) return results.sort_values(by='levenshteinDistance').head(k) # - #output homologs output_k_homologs(seq1, 10)
Levenshtein.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Store price prediction # # ## 1. Loading prerequisites # #### 1.1. Libraries suppressWarnings({ if(!require(dplyr)) install.packages("dplyr") if(!require(ggplot2)) install.packages("ggplot2") if(!require(caret)) install.packages("caret") if(!require(psych)) install.packages("psych") #if(!require(Hmisc)) install.packages("Hmisc",dependencies = TRUE) library(dplyr) library(ggplot2) library(caret) library(psych) #library(Hmisc) }) options(repr.plot.width=6, repr.plot.height=4) # #### 1.2. Data DATA_PATH = "./data/" train_raw = read.table(paste0(DATA_PATH,"Train.csv"), header = T, sep = ",") test_raw = read.table(paste0(DATA_PATH,"Test.csv"), header = T, sep = ",") pred_true = read.table(paste0(DATA_PATH,"Sample Submission.csv"), header = T, sep = ",") dim(train_raw); dim(test_raw); dim(pred_true) # ## 2. Exploratory data analysis # #### 2.1. tabular view of data head(train_raw, 10) # #### 2.2. Studying the original structure of the data sapply(data, class) data = mutate( train_raw, InvoiceNo = as.numeric(InvoiceNo), StockCode = factor(StockCode), Description = factor(Description), Quantity = as.numeric(Quantity), InvoiceDate = as.POSIXlt(InvoiceDate), UnitPrice = as.numeric(UnitPrice), CustomerID = factor(CustomerID), Country = factor(Country) ) data = train_raw # #### 2.3 Summarizing the data in training set summary(data) # #### 2.5. Check for missing data sapply(data, function(x){sum(is.na(x))}) # #### 2.6. Count of unique values in each fields sapply(data, function(x){length(unique(x))}) sapply(data[,-c(1,5)], function(x){round(cor(x,data[,1]),2)}) # This machine learning problem aims to predict the price of items in store, not forecast trend in prices overtime, therefore we may remove the InvoiceDate field. # # Also the InvoiceNo is very little correlated to the other columns hence can't be used to fit the model data = select(data, select = -c(InvoiceNo,InvoiceDate)) sapply(data, skew) qplot(data$UnitPrice, bins=30) # The unit price field being highly skewed, variables need to be normalized and/or scaled for regression model to fit the values correctly, otherwise would lead to predictions biased to certain values.
Case Studies/R/store_price_prediction/.ipynb_checkpoints/analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="hBgIy9eYYx35" # ## **Installation** # Requirements: # - python >= 3.7 # # We highly recommend CUDA when using torchRec. If using CUDA: # - cuda >= 11.0 # # + id="BB2K68OYUJ_t" # install conda to make installying pytorch with cudatoolkit 11.3 easier. # !sudo rm Miniconda3-py37_4.9.2-Linux-x86_64.sh Miniconda3-py37_4.9.2-Linux-x86_64.sh.* # !sudo wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.9.2-Linux-x86_64.sh # !sudo chmod +x Miniconda3-py37_4.9.2-Linux-x86_64.sh # !sudo bash ./Miniconda3-py37_4.9.2-Linux-x86_64.sh -b -f -p /usr/local # + id="sFYvP95xaAER" # install pytorch with cudatoolkit 11.3 # !sudo conda install pytorch cudatoolkit=11.3 -c pytorch-nightly -y # + [markdown] id="7iY7Uv11mJYK" # Installing torchRec will also install [FBGEMM](https://github.com/pytorch/fbgemm), a collection of CUDA kernels and GPU enabled operations to run # + id="tUnIw-ZREQJy" # install torchrec # !pip3 install torchrec-nightly # + [markdown] id="0wLX94Lw_Lml" # Install multiprocess which works with ipython to for multi-processing programming within colab # + id="HKoKRP-QzRCF" # !pip3 install multiprocess # + [markdown] id="b6EHgotRXFQh" # The following steps are needed for the Colab runtime to detect the added shared libraries. The runtime searches for shared libraries in /usr/lib, so we copy over the libraries which were installed in /usr/local/lib/. **This is a very necessary step, only in the colab runtime**. # + id="_P45pDteRcWj" # !sudo cp /usr/local/lib/lib* /usr/lib/ # + [markdown] id="n5_X2WOAYG3c" # \**Restart your runtime at this point for the newly installed packages to be seen.** Run the step below immediately after restarting so that python knows where to look for packages. **Always run this step after restarting the runtime.** # + id="8cktNrh8R9rC" import sys sys.path = ['', '/env/python', '/usr/local/lib/python37.zip', '/usr/local/lib/python3.7', '/usr/local/lib/python3.7/lib-dynload', '/usr/local/lib/python3.7/site-packages', './.local/lib/python3.7/site-packages'] # + [markdown] id="HWBOrwVSnrNE" # ## **Overview** # This tutorial will mainly cover the sharding schemes of embedding tables via `EmbeddingPlanner` and `DistributedModelParallel` API and explore the benefits of different sharding schemes for the embedding tables by explicitly configuring them. # + [markdown] id="udsN6PlUo1zF" # ### Distributed Setup # Due to the notebook enviroment, we cannot run [`SPMD`](https://en.wikipedia.org/wiki/SPMD) program here but we can do multiprocessing inside the notebook to mimic the setup. Users should be responsible for setting up their own [`SPMD`](https://en.wikipedia.org/wiki/SPMD) launcher when using Torchrec. # We setup our environment so that torch distributed based communication backend can work. # + id="4-v17rxkopQw" import os import torch import torchrec os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "29500" # + [markdown] id="ZdSUWBRxoP8R" # ### Constructing our embedding model # Here we use TorchRec offering of [`EmbeddingBagCollection`](https://github.com/facebookresearch/torchrec/blob/main/torchrec/modules/embedding_modules.py#L59) to construct our embedding bag model with embedding tables. # # Here, we create an EmbeddingBagCollection (EBC) with four embedding bags. We have two types of tables: large tables and small tables differentiated by their row size difference: 4096 vs 1024. Each table is still represented by 64 dimension embedding. # # We configure the `ParameterConstraints` data structure for the tables, which provides hints for the model parallel API to help decide the sharding and placement strategy for the tables. # In TorchRec, we support # * `table-wise`: place the entire table on one device; # * `row-wise`: shard the table evenly by row dimension and place one shard on each device of the communication world; # * `column-wise`: shard the table evenly by embedding dimension, and place one shard on each device of the communication world; # * `table-row-wise`: special sharding optimized for intra-host communication for available fast intra-machine device interconnect, e.g. NVLink; # * `data_parallel`: replicate the tables for every device; # # Note how we initially allocate the EBC on device "meta". This will tell EBC to not allocate memory yet. # + id="e7UQBuG09hbj" from torchrec.distributed.planner.types import ParameterConstraints from torchrec.distributed.embedding_types import EmbeddingComputeKernel from torchrec.distributed.types import ShardingType from typing import Dict large_table_cnt = 2 small_table_cnt = 2 large_tables=[ torchrec.EmbeddingBagConfig( name="large_table_" + str(i), embedding_dim=64, num_embeddings=4096, feature_names=["large_table_feature_" + str(i)], pooling=torchrec.PoolingType.SUM, ) for i in range(large_table_cnt) ] small_tables=[ torchrec.EmbeddingBagConfig( name="small_table_" + str(i), embedding_dim=64, num_embeddings=1024, feature_names=["small_table_feature_" + str(i)], pooling=torchrec.PoolingType.SUM, ) for i in range(small_table_cnt) ] def gen_constraints(sharding_type: ShardingType = ShardingType.TABLE_WISE) -> Dict[str, ParameterConstraints]: large_table_constraints = { "large_table_" + str(i): ParameterConstraints( sharding_types=[sharding_type.value], ) for i in range(large_table_cnt) } small_table_constraints = { "small_table_" + str(i): ParameterConstraints( sharding_types=[sharding_type.value], ) for i in range(small_table_cnt) } constraints = {**large_table_constraints, **small_table_constraints} return constraints # + id="Iz_GZDp_oQ19" ebc = torchrec.EmbeddingBagCollection( device="cuda", tables=large_tables + small_tables ) # + [markdown] id="7m0_ssVLFQEH" # ### DistributedModelParallel in multiprocessing # Now, we have a single process execution function for mimicking one rank's work during [`SPMD`](https://en.wikipedia.org/wiki/SPMD) execution. # # This code will shard the model collectively with other processes and allocate memories accordingly. It first sets up process groups and do embedding table placement using planner and generate sharded model using `DistributedModelParallel`. # # + id="PztCaGmLA85u" def single_rank_execution( rank: int, world_size: int, constraints: Dict[str, ParameterConstraints], module: torch.nn.Module, backend: str, ) -> None: import os import torch import torch.distributed as dist from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder from torchrec.distributed.model_parallel import DistributedModelParallel from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology from torchrec.distributed.types import ModuleSharder, ShardingEnv from typing import cast def init_distributed_single_host( rank: int, world_size: int, backend: str, # pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type. ) -> dist.ProcessGroup: os.environ["RANK"] = f"{rank}" os.environ["WORLD_SIZE"] = f"{world_size}" dist.init_process_group(rank=rank, world_size=world_size, backend=backend) return dist.group.WORLD if backend == "nccl": device = torch.device(f"cuda:{rank}") torch.cuda.set_device(device) else: device = torch.device("cpu") topology = Topology(world_size=world_size, compute_device="cuda") pg = init_distributed_single_host(rank, world_size, backend) planner = EmbeddingShardingPlanner( topology=topology, constraints=constraints, ) sharders = [cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())] plan: ShardingPlan = planner.collective_plan(module, sharders, pg) sharded_model = DistributedModelParallel( module, env=ShardingEnv.from_process_group(pg), plan=plan, sharders=sharders, device=device, ) print(f"rank:{rank},sharding plan: {plan}") return sharded_model # + [markdown] id="3YvDnV_wz_An" # ### Multiprocessing Execution # Now let's execute the code in multi-processes representing multiple GPU ranks. # # # + id="arW0Jf6qEl-h" import multiprocess def spmd_sharing_simulation( sharding_type: ShardingType = ShardingType.TABLE_WISE, world_size = 2, ): ctx = multiprocess.get_context("spawn") processes = [] for rank in range(world_size): p = ctx.Process( target=single_rank_execution, args=( rank, world_size, gen_constraints(sharding_type), ebc, "nccl" ), ) p.start() processes.append(p) for p in processes: p.join() assert 0 == p.exitcode # + [markdown] id="31UWMaymj7Pu" # ### Table Wise Sharding # Now let's execute the code in two processes for 2 GPUs. We can see in the plan print that how our tables are sharded across GPUs. Each node will have one large table and one small which shows our planner tries for load balance for the embedding tables. Table-wise is the de-factor go-to sharding schemes for many small-medium size tables for load balancing over the devices. # + colab={"base_uri": "https://localhost:8080/"} id="Yb4v1HA3IJzU" outputId="b8f08b10-eb85-48f3-8705-b67efd4eba2c" spmd_sharing_simulation(ShardingType.TABLE_WISE) # + [markdown] id="5HkwxEwm4O8u" # ### Explore other sharding modes # We have initially explored what table-wise sharding would look like and how it balances the tables placement. Now we explore sharding modes with finer focus on load balance: row-wise. Row-wise is specifically addressing large tables which a single device cannot hold due to the memory size increase from large embedding row numbers. It can address the placement of the super large tables in your models. Users can see that in the `shard_sizes` section in the printed plan log, the tables are halved by row dimension to be distributed onto two GPUs. # # + id="pGBgReGx5VrB" colab={"base_uri": "https://localhost:8080/"} outputId="6e22a2f0-7373-4dcc-ee69-67f3e95d78a7" spmd_sharing_simulation(ShardingType.ROW_WISE) # + [markdown] id="mqnInw_uEjjY" # Column-wise on the other hand, address the load imbalance problems for tables with large embedding dimensions. We will split the table vertically. Users can see that in the `shard_sizes` section in the printed plan log, the tables are halved by embedding dimension to be distributed onto two GPUs. # # + id="DWTyuV9I5afU" colab={"base_uri": "https://localhost:8080/"} outputId="daaa95cd-f653-47fe-809f-5d1d63cc05d7" spmd_sharing_simulation(ShardingType.COLUMN_WISE) # + [markdown] id="711VBygVHGJ6" # For `table-row-wise`, unfortuately we cannot simulate it due to its nature of operating under multi-host setup. We will present a python [`SPMD`](https://en.wikipedia.org/wiki/SPMD) example in the future to train models with `table-row-wise`. # + [markdown] id="1G8aUfmeMA7m" # # With data parallel, we will repeat the tables for all devices. # # + colab={"base_uri": "https://localhost:8080/"} id="WFk-QLlRL-ST" outputId="662a6d6e-cb1b-440d-ff1b-4619076117a3" spmd_sharing_simulation(ShardingType.DATA_PARALLEL)
examples/sharding/sharding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # + [markdown] slideshow={"slide_type": "slide"} # # The Future # # ## 1. Julia blurs the line between ordinary users, developers, and data scientists # ## 2. Increase productivity and brain cycles # ## 3. Fast development time # ## 4. Fast run time # ## 5. Fast exploratory time # ## 6. Promote reproducibility and documentation by employing easier workflow
7. Future.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- require( readxl ) require( Cairo ) # + # Create directories for output files analysis_root <- "analysis" out_6_dir <- paste( analysis_root, "/labs-6-groups", sep="" ) out_3_dir <- paste( analysis_root, "/labs-3-groups", sep="" ) dir.create( analysis_root ) dir.create( out_6_dir ) dir.create( out_3_dir ) # - # Function to create a dataframe consisting of only the groups and columns of interest for this analysis createGroupSubset <- function( h, grps ) { dd <- data.frame() # this will order the dataframe chronologically which gives the correct order for graphing tps <- c("BL", "EH", "MID REBOA", "END REBOA", "1 HR", "6 HR", "12 HR", "ES" ) for( tp in tps ) { idx = which( colnames(h) == tp ) if( length(idx) > 0 ) { f <- data.frame( animal_id=h$animal_id, group=h$group, TP=tp, val=h[idx][[1]] ) if( nrow( dd ) == 0 ) { dd <- f } else { dd <- rbind( dd, f ) } } } # remove any rows with a NA value subset( dd, !is.na( dd$val ) ) } # + # Use the "aggregate" function to create a dataframe with group, timepoint, mean, and SD createAggregateAnalysis <- function( dd ) { myAnalysis <- aggregate(dd$val, by = list(grp = dd$group, TP = dd$TP), FUN = function(x) c(mean = mean(x, na.rm=TRUE), sd = sd(x, na.rm = TRUE), n = length(x[!is.na(x)]) )) myAnalysis <- do.call(data.frame, myAnalysis) myAnalysis$se <- myAnalysis$x.sd / sqrt(myAnalysis$x.n) colnames(myAnalysis) <- c("grp", "TP", "mean", "sd", "n", "se") # The names column is used to label every bar myAnalysis$names <- as.character(myAnalysis$TP) # but we don't really want to label very bar, only the middle bar in each # cluster of bars, so set the undesired names to "" skip = trunc( (length(grps) + 1)/2 ) for( idx in c(1:length(grps)) ) { if( idx != skip ) { idxs <- seq(idx, length(myAnalysis$names),length(grps)) myAnalysis$names[ idxs ] <- "" } } myAnalysis } # + makeOneAnalysisPlot <- function( myAnalysis, labName, y_min, y_max ) { tabbedMeans <- tapply(myAnalysis$mean, list(myAnalysis$grp, myAnalysis$TP), function(x) c(x = x)) barCenters <- barplot(height = tabbedMeans, beside = TRUE, las = 1, ylim = c(y_min, y_max), xpd=FALSE, xaxt = "n", main = "", ylab = labName, xlab = "", border = "black", axes = TRUE, legend.text = TRUE, font=2, font.lab=2, cex.lab=1.5, # font expansion factor args.legend = list(title = "Group", x = "topright" )) # add standard-error-of-the-mean indicators to the graph tapply(myAnalysis$mean, list(myAnalysis$grp, myAnalysis$TP), function(x) c(x = x)) tabbedSE <- tapply(myAnalysis$se, list(myAnalysis$grp, myAnalysis$TP), function(x) c(x = x)) dy = (y_max - y_min) * 0.02 text(x = barCenters, y = par("usr")[3] - dy, srt = 30, adj = 1, labels = myAnalysis$names, xpd = TRUE, font=2, cex=1.3) segments(barCenters, tabbedMeans - tabbedSE, barCenters, tabbedMeans + tabbedSE, lwd = 1.5) arrows(barCenters, tabbedMeans - tabbedSE, barCenters, tabbedMeans + tabbedSE, lwd = 1.5, angle = 90, code = 3, length = 0.05) # tidy up the graph with a line on the bottom row abline(a = y_min, b = 0) } # + readAndPlotOneLab <- function( labName, y_min, y_max, grps ) { # read the data xls <- "data/labs.xlsx" h <- read_xlsx( xls, sheet=labName ) # convert the group to factors, ordering the factors in chronological order h$group <- factor( h$group, levels=grps ) dd <- createGroupSubset( h, grps ) analysis <- createAggregateAnalysis( dd ) makeOneAnalysisPlot( analysis, labName, y_min, y_max ) } # - saveOneGraph <- function( destdir, labName, ... ) { fileName <- paste( destdir, "/", labName, ".png", sep="" ) # png( file=fileName, width=800, height=600, units="px" ) # par(mar = c(5, 6, 4, 5) + 0.1) # par(font=2) # bold Cairo(file=fileName, type="png", units="in", bg="white", width=8, height=6, pointsize=11, res=150) par(mar = c(5, 8, 4, 5) + 0.1) par(font=2) readAndPlotOneLab( labName, ... ) dev.off() } # + grps=c("FR","P45","P60") destdir <- out_3_dir par( mfrow=c(3,2)) readAndPlotOneLab( "Creatinine", 0, 3.7, grps) readAndPlotOneLab( "Troponin", 0, 25, grps) readAndPlotOneLab( "Myoglobin", 0, 2000, grps) readAndPlotOneLab( "Lactate", 0, 16, grps) readAndPlotOneLab( "pH", 7.0, 8.0, grps) saveOneGraph(destdir, "Creatinine", 0, 3.7, grps) saveOneGraph(destdir, "Troponin", 0, 25, grps) saveOneGraph(destdir, "Myoglobin", 0, 2000, grps) saveOneGraph(destdir, "Lactate", 0, 16, grps) saveOneGraph(destdir, "pH", 7.0, 8.0, grps) # -
labs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hindmarsh-Roseモデル # # \begin{equation} # \left\{ # \begin{aligned} # \dot{V} &= n - a V^3 + b V^2 - h + I,\\ # \dot{n} &= c - d V^2 -n, \\ # \dot{h} &= r(\sigma (V-V_0)-h). # \end{aligned} # \label{eqn:HR} # \right. # \end{equation} # $a = 1, b = 3, c = 1, d = 5, I = 2, r=0.001, \sigma =4, V_0=-1.6$とする import numpy as np from scipy.integrate import solve_ivp import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline sns.set('poster', 'whitegrid', 'dark', rc={"lines.linewidth": 2, 'grid.linestyle': '--'}) def HR(t, x, a,b,c,d,I,r,s,V0): V,n,h = x[0],x[1],x[2] return [n - a * V**3 + b * V**2 - h + I, c - d * V**2 - n, r*(s*(V-V0)-h)] t0 = 0.0 t1 = 2000.0 x0 = [-1.0, 2.0, 0.0] s0 = solve_ivp(HR, [t0, t1], x0, args=([1.0, 3.0, 1.0, 5.0, 2.0, 0.001, 4.0, -1.6]), method='DOP853', dense_output=True, rtol=1e-10, atol=1e-12) T = np.linspace(500, 2000, 10000) sol = s0.sol(T) fig = plt.figure(figsize=(9,6)) ax = fig.add_subplot(111) ax.set_xlabel("$t$") ax.set_xlim(500,2000) ax.set_ylim(-2,2.5) ax.plot(T, sol.T[:,0], '-k', linewidth=2, label="$V(t)$") ax.plot(T, sol.T[:,2], '-', color='gray', linewidth=2, label="$h(t)$") ax.legend(loc='upper center', bbox_to_anchor=(.5, -.15), ncol=2) # plt.savefig("burst_vh.pdf", bbox_inches='tight') T = np.linspace(500, 2000, 40000) sol = s0.sol(T) fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) ax.set_xlabel("$V$") ax.set_ylabel("$n$") ax.plot(sol.T[:,0],sol.T[:,1], '-k', linewidth=2) # plt.savefig("burst_vn.pdf", bbox_inches='tight') T = np.linspace(500, 2000, 40000) sol = s0.sol(T) fig = plt.figure(figsize=(8,5)) ax = fig.add_subplot(111) ax.set_xlabel("$n$") ax.set_ylabel("$h$") ax.plot(sol.T[:,1],sol.T[:,2], '-k', linewidth=2) # plt.savefig("burst_vh.pdf", bbox_inches='tight')
notebooks/oscillation/Hindmarsh-Rose.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="Sc7ixKCVXexy" # # 머신 러닝 교과서 3판 # + [markdown] id="CXu4zTlPXex2" # # 6장 - 모델 평가와 하이퍼파라미터 튜닝의 모범 사례 # + [markdown] id="BNT2eIXlXex3" # **아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.** # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch06/ch06.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch06/ch06.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> # </td> # </table> # + [markdown] id="qXHNJE0nXex3" # ### 목차 # + [markdown] id="7cecuaUCXex3" # - 파이프라인을 사용한 효율적인 워크플로 # - 위스콘신 유방암 데이터셋 # - 파이프라인으로 변환기와 추정기 연결 # - k-겹 교차 검증을 사용한 모델 성능 평가 # - 홀드아웃 방법 # - k-겹 교차 검증 # - 학습 곡선과 검증 곡선을 사용한 알고리즘 디버깅 # - 학습 곡선으로 편향과 분산 문제 분석 # - 검증 곡선으로 과대적합과 과소적합 조사 # - 그리드 서치를 사용한 머신 러닝 모델 세부 튜닝 # - 그리드 서치를 사용한 하이퍼파라미터 튜닝 # - 중첩 교차 검증을 사용한 알고리즘 선택 # - 여러 가지 성능 평가 지표 # - 오차 행렬 # - 분류 모델의 정밀도와 재현율 최적화 # - ROC 곡선 그리기 # - 다중 분류의 성능 지표 # - 불균형한 클래스 다루기 # - 요약 # + [markdown] id="dXVZNdlrXex4" # <br> # <br> # + colab={"base_uri": "https://localhost:8080/"} id="gZ91bR4lXex4" outputId="8229509e-a8f2-4761-843b-bc70afff06ef" # 코랩에서 실행할 경우 최신 버전의 사이킷런을 설치합니다. # !pip install --upgrade scikit-learn # + id="6GyXEH3GXex4" from IPython.display import Image # + [markdown] id="dTTyZ3SuXex4" # # 파이프라인을 사용한 효율적인 워크플로 # + [markdown] id="QYIcZSKcXex5" # ... # + [markdown] id="Mq-PXFWeXex5" # ## 위스콘신 유방암 데이터셋 # + colab={"base_uri": "https://localhost:8080/", "height": 218} id="7Xi9I0nJXex5" outputId="e7fb456e-0a1c-4a2e-f2c9-2dca45fed714" import pandas as pd df = pd.read_csv('https://archive.ics.uci.edu/ml/' 'machine-learning-databases' '/breast-cancer-wisconsin/wdbc.data', header=None) # UCI 머신 러닝 저장소에서 유방암 데이터셋을 다운로드할 수 없을 때 # 다음 주석을 해제하고 로컬 경로에서 데이터셋을 적재하세요: # df = pd.read_csv('wdbc.data', header=None) df.head() # + colab={"base_uri": "https://localhost:8080/"} id="3_0ehQ7FXex6" outputId="b9da4be2-aa64-473c-ae8c-b69bb14f53a3" df.shape # + [markdown] id="SLpsQoCvXex6" # <hr> # + colab={"base_uri": "https://localhost:8080/"} id="ynS23_cCXex6" outputId="746839b2-20c4-468e-ba3d-b158839c95c6" from sklearn.preprocessing import LabelEncoder X = df.loc[:, 2:].values y = df.loc[:, 1].values le = LabelEncoder() y = le.fit_transform(y) le.classes_ # + colab={"base_uri": "https://localhost:8080/"} id="2-aH5O6AXex7" outputId="d587a9d3-4c70-4ca7-e264-436bcc8b4e70" le.transform(['M', 'B']) # + id="FriWTHB2Xex7" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.20, stratify=y, random_state=1) # + [markdown] id="1PjFC59vXex7" # <br> # <br> # + [markdown] id="CEPNZtaKXex7" # ## 파이프라인으로 변환기와 추정기 연결 # + colab={"base_uri": "https://localhost:8080/"} id="rQ2QJb4HXex7" outputId="6a1d9127-8928-4bbb-a927-0f13983ce7f6" from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline pipe_lr = make_pipeline(StandardScaler(), PCA(n_components=2), LogisticRegression(random_state=1)) pipe_lr.fit(X_train, y_train) y_pred = pipe_lr.predict(X_test) print('테스트 정확도: %.3f' % pipe_lr.score(X_test, y_test)) # + colab={"base_uri": "https://localhost:8080/", "height": 144} id="CoeVaXRdXex8" outputId="7b9a9e3d-b715-4205-ebd6-2ae921980d9b" import matplotlib.pyplot as plt from sklearn import set_config set_config(display='diagram') pipe_lr # + colab={"base_uri": "https://localhost:8080/", "height": 399} id="HlvW4t-nXex8" outputId="16b9d14c-776a-419e-8ca6-ff09eba43da4" Image(url='https://git.io/JtsTr', width=500) # + [markdown] id="MQsfoWA4Xex8" # <br> # <br> # + [markdown] id="JAF93G-MXex8" # # k-겹 교차 검증을 사용한 모델 성능 평가 # + [markdown] id="Jn5TEjHUXex8" # ... # + [markdown] id="nlkqYxr_Xex9" # ## 홀드아웃 방법 # + colab={"base_uri": "https://localhost:8080/", "height": 391} id="WiEErC_RXex9" outputId="2b9e173c-83b4-43f9-9ad1-316a23022d06" Image(url='https://git.io/JtsTo', width=500) # + [markdown] id="NNRz86PZXex9" # <br> # <br> # + [markdown] id="KP4qCAdmXex9" # ## K-겹 교차 검증 # + colab={"base_uri": "https://localhost:8080/", "height": 299} id="X2nepTeiXex9" outputId="c4a5664e-03d1-498a-eb9c-5e62075bd16b" Image(url='https://git.io/JtsT6', width=500) # + colab={"base_uri": "https://localhost:8080/"} id="N-pc5EjLXex9" outputId="35763a31-8b5a-4a75-ef91-965df3ca8add" import numpy as np from sklearn.model_selection import StratifiedKFold kfold = StratifiedKFold(n_splits=10).split(X_train, y_train) scores = [] for k, (train, test) in enumerate(kfold): pipe_lr.fit(X_train[train], y_train[train]) score = pipe_lr.score(X_train[test], y_train[test]) scores.append(score) print('폴드: %2d, 클래스 분포: %s, 정확도: %.3f' % (k+1, np.bincount(y_train[train]), score)) print('\nCV 정확도: %.3f +/- %.3f' % (np.mean(scores), np.std(scores))) # + colab={"base_uri": "https://localhost:8080/"} id="oJ98nuHyXex-" outputId="8503d01a-a43c-4e24-8824-9b8110d739d5" from sklearn.model_selection import cross_val_score scores = cross_val_score(estimator=pipe_lr, X=X_train, y=y_train, cv=10, n_jobs=1) print('CV 정확도 점수: %s' % scores) print('CV 정확도: %.3f +/- %.3f' % (np.mean(scores), np.std(scores))) # + colab={"base_uri": "https://localhost:8080/"} id="b3xbOJPkXex-" outputId="4fadb4bd-057f-4683-9f53-e69562f07f64" from sklearn.model_selection import cross_validate scores = cross_validate(estimator=pipe_lr, X=X_train, y=y_train, scoring=['accuracy'], cv=10, n_jobs=-1) print('CV 정확도 점수: %s' % scores['test_accuracy']) print('CV 정확도: %.3f +/- %.3f' % (np.mean(scores['test_accuracy']), np.std(scores['test_accuracy']))) # + [markdown] id="GHTLzmDRXex-" # `cross_val_predict` 함수는 `cross_val_score`와 비슷한 인터페이스를 제공하지만 훈련 데이터셋의 각 샘플이 테스트 폴드가 되었을 때 만들어진 예측을 반환합니다. 따라서 `cross_val_predict` 함수의 결과를 사용해 모델의 성능(예를 들어, 정확도)을 계산하면 `cross_val_score` 함수의 결과와 다르며 바람직한 일반화 성능 추정이 아닙니다. `cross_val_predict` 함수의 사용 용도는 훈련 세트에 대한 예측 결과를 시각화하거나 7장에서 소개하는 스태킹(Stacking) 앙상블(Ensemble) 방법처럼 다른 모델에 주입할 훈련 데이터를 만들기 위해 사용합니다. # + colab={"base_uri": "https://localhost:8080/"} id="HiitSmD5Xex-" outputId="fa4f60d0-9839-4a27-f548-cd131122f8ee" from sklearn.model_selection import cross_val_predict preds = cross_val_predict(estimator=pipe_lr, X=X_train, y=y_train, cv=10, n_jobs=-1) preds[:10] # + [markdown] id="gaI43iGHXex_" # `method` 매개변수에 반환될 값을 계산하기 위한 모델의 메서드를 지정할 수 있습니다. 예를 들어 `method='predict_proba'`로 지정하면 예측 확률을 반환합니다. `‘predict’`, `‘predict_proba’`, `‘predict_log_proba’`, `‘decision_function’` 등이 가능하며 기본값은 `'predict'`입니다. # + colab={"base_uri": "https://localhost:8080/"} id="4FRSLbhbXex_" outputId="bae200c4-901d-4773-a646-2734c20d00c9" from sklearn.model_selection import cross_val_predict preds = cross_val_predict(estimator=pipe_lr, X=X_train, y=y_train, cv=10, method='predict_proba', n_jobs=-1) preds[:10] # + [markdown] id="70pHlbHmXex_" # <br> # <br> # + [markdown] id="L0HTBIHNXex_" # # 학습 곡선과 검증 곡선을 사용한 알고리즘 디버깅 # + [markdown] id="QW_23nd0Xex_" # <br> # <br> # + [markdown] id="PW1UmZR2Xex_" # ## 학습 곡선으로 편향과 분산 문제 분석 # + colab={"base_uri": "https://localhost:8080/", "height": 539} id="OOXnlujfXex_" outputId="a5e4090e-c4cb-49c8-b47b-6a1dd9ce6e53" Image(url='https://git.io/JtsTi', width=600) # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Vs0IzgY7XeyA" outputId="6f95d39c-cdb0-4558-bc62-5bdeaef7e2df" import matplotlib.pyplot as plt from sklearn.model_selection import learning_curve pipe_lr = make_pipeline(StandardScaler(), LogisticRegression(penalty='l2', random_state=1, max_iter=10000)) train_sizes, train_scores, test_scores =\ learning_curve(estimator=pipe_lr, X=X_train, y=y_train, train_sizes=np.linspace(0.1, 1.0, 10), cv=10, n_jobs=1) train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='Training accuracy') plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue') plt.plot(train_sizes, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='Validation accuracy') plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green') plt.grid() plt.xlabel('Number of training examples') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.ylim([0.8, 1.03]) plt.tight_layout() # plt.savefig('images/06_05.png', dpi=300) plt.show() # + [markdown] id="c5_aBJ5OXeyA" # <br> # <br> # + [markdown] id="7ktQMnb5XeyB" # ## 검증 곡선으로 과대적합과 과소적합 조사 # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="Ruln2oWfXeyB" outputId="f0614a0f-20ac-4f4c-b0ec-ad40cbc7a3b9" from sklearn.model_selection import validation_curve param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0] train_scores, test_scores = validation_curve( estimator=pipe_lr, X=X_train, y=y_train, param_name='logisticregression__C', param_range=param_range, cv=10) train_mean = np.mean(train_scores, axis=1) train_std = np.std(train_scores, axis=1) test_mean = np.mean(test_scores, axis=1) test_std = np.std(test_scores, axis=1) plt.plot(param_range, train_mean, color='blue', marker='o', markersize=5, label='Training accuracy') plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue') plt.plot(param_range, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='Validation accuracy') plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green') plt.grid() plt.xscale('log') plt.legend(loc='lower right') plt.xlabel('Parameter C') plt.ylabel('Accuracy') plt.ylim([0.8, 1.0]) plt.tight_layout() # plt.savefig('images/06_06.png', dpi=300) plt.show() # + [markdown] id="zjw7glssXeyB" # <br> # <br> # + [markdown] id="kP91PEYGXeyB" # # 그리드 서치를 사용한 머신 러닝 모델 세부 튜닝 # + [markdown] id="2vqzpeSQXeyB" # <br> # <br> # + [markdown] id="SR4wopwbXeyB" # ## 그리드 서치를 사용한 하이퍼파라미터 튜닝 # + colab={"base_uri": "https://localhost:8080/"} id="gwVJIOhMXeyB" outputId="52d29104-9ed2-4324-f7f4-d891062bd4b9" from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC pipe_svc = make_pipeline(StandardScaler(), SVC(random_state=1)) param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0] param_grid = [{'svc__C': param_range, 'svc__kernel': ['linear']}, {'svc__C': param_range, 'svc__gamma': param_range, 'svc__kernel': ['rbf']}] gs = GridSearchCV(estimator=pipe_svc, param_grid=param_grid, scoring='accuracy', refit=True, cv=10, n_jobs=-1) gs = gs.fit(X_train, y_train) print(gs.best_score_) print(gs.best_params_) # + colab={"base_uri": "https://localhost:8080/"} id="kzgc-Z3UXeyC" outputId="65466051-126e-48d6-9144-af30d98d13df" clf = gs.best_estimator_ # refit=True로 지정했기 때문에 다시 fit() 메서드를 호출할 필요가 없습니다. # clf.fit(X_train, y_train) print('테스트 정확도: %.3f' % clf.score(X_test, y_test)) # + [markdown] id="CWqRUnKxXeyC" # `GridSearchCV` 클래스와 `cross_valiate` 함수에서 `return_train_score` 매개변수를 `True`로 지정하면 훈련 폴드에 대한 점수를 계산하여 반환합니다. 훈련 세트에 대한 점수를 보고 과대적합과 과소적합에 대한 정보를 얻을 수 있지만 실행 시간이 오래 걸릴 수 있습니다. `param_range`에 8개의 값이 지정되어 있기 때문에 `SVC` 모델은 `'linear'` 커널에 대해 8번, `'rbf'` 커널에 대해 64번 교차 검증이 수행됩니다. 따라서 훈련 폴드마다 반환되는 점수는 총 72개입니다. 이 값은 `GridSearchCV` 클래스의 `cv_results_` 딕셔너리 속성에 split{폴드번호}_train_score 와 같은 키에 저장되어 있습니다. 예를 들어 첫 번째 폴드의 점수는 `'split0_train_score'` 키로 저장되어 있습니다. # + id="eikpmc6IXeyD" gs = GridSearchCV(estimator=pipe_svc, param_grid=param_grid, scoring='accuracy', cv=10, return_train_score=True, n_jobs=-1) gs = gs.fit(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="Pl6MWXgJXeyD" outputId="ddf57e3d-b088-4b1d-eb48-7e421caf7a2b" gs.cv_results_['split0_train_score'] # + [markdown] id="BlPgz112XeyD" # 전체 훈련 점수의 평균 값은 `'mean_train_score'` 키에 저장되어 있습니다. # + colab={"base_uri": "https://localhost:8080/"} id="wyhcP07sXeyD" outputId="326f0398-5798-41e9-9013-61b3fccc4f75" gs.cv_results_['mean_train_score'] # + [markdown] id="kWZ4WJ0aXeyD" # 비슷하게 첫 번째 폴드에 대한 테스트 점수는 `'split0_test_score'` 키에 저장되어 있습니다. # + colab={"base_uri": "https://localhost:8080/"} id="6ffxAFTEXeyD" outputId="cc6f7e4f-40ae-4c6d-83f2-ed95e5003b96" gs.cv_results_['split0_test_score'] # + [markdown] id="DpViR9TDXeyE" # `GridSearchCV` 클래스의 객체에서도 최종 모델의 `score`, `predict`, `transform` 메서드를 바로 호출할 수 있습니다. # + colab={"base_uri": "https://localhost:8080/"} id="6XykF-FtXeyE" outputId="2bb67732-3833-4fb2-ce99-8481229d3c7b" print('테스트 정확도: %.3f' % gs.score(X_test, y_test)) # + [markdown] id="5RIW-6Q2XeyE" # 매개변수 탐색 범위가 넓거나 규제 매개변수 `C`와 같이 연속적인 값을 탐색해야 하는 경우에 `RandomizedSearchCV`가 더 효율적입니다. 이 클래스는 `n_iter` 매개변수로 탐색 횟수를 조정할 수 있어 컴퓨팅 자원에 맞게 탐색을 실행할 수 있습니다. `GridSearchCV` 클래스에는 탐색할 매개변수 값을 리스트로 전달했습니다. `RandomizedSearchCV`에는 샘플링 가능한 분포를 지정해야 합니다. 예를 들면 `scipy.stats.uniform`, `scipy.stats.randint` 객체 등이 있습니다. 사이킷런 0.22 버전에서 `scipy.stats.reciprocal`을 사용한 로그 균등 분포(log-uniform distribution) 클래스 `loguniform`을 제공합니다. # # 다음 예에서 그리드 서치에서 사용한 것과 동일한 범위로 매개변수 `C`를 탐색해 보겠습니다. 대신 여기에서는 탐색 횟수를 30회로 줄여서 수행합니다. # + colab={"base_uri": "https://localhost:8080/"} id="QFbACppOXeyE" outputId="f6c1ca3c-6391-4085-fab4-3c8d37326d91" from sklearn.model_selection import RandomizedSearchCV from sklearn.utils.fixes import loguniform distribution = loguniform(0.0001, 1000.0) param_dist = [{'svc__C': distribution, 'svc__kernel': ['linear']}, {'svc__C': distribution, 'svc__gamma': distribution, 'svc__kernel': ['rbf']}] rs = RandomizedSearchCV(estimator=pipe_svc, param_distributions=param_dist, n_iter=30, cv=10, random_state=1, n_jobs=-1) rs = rs.fit(X_train, y_train) print(rs.best_score_) print(rs.best_params_) # + [markdown] id="ox0WF0DYXeyE" # 결과에서 알 수 있듯이 `RandomizedSearchCV`의 탐색 횟수는 절반 이상 적지만 거의 비슷한 성능을 내는 매개변수 조합을 찾았습니다. # + [markdown] id="2WYH5DSRXeyE" # <br> # <br> # + [markdown] id="-32xp1JGXeyE" # ## 중첩 교차 검증을 사용한 알고리즘 선택 # + colab={"base_uri": "https://localhost:8080/", "height": 454} id="H7HK1myoXeyE" outputId="d66c3384-580a-426a-8278-e0342e857291" Image(url='https://git.io/JtsTP', width=500) # + colab={"base_uri": "https://localhost:8080/"} id="MHJRx5G6XeyF" outputId="d6ef1050-d964-4a56-91e1-ce6af4ac3c16" gs = GridSearchCV(estimator=pipe_svc, param_grid=param_grid, scoring='accuracy', cv=2) scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5) print('CV 정확도: %.3f +/- %.3f' % (np.mean(scores), np.std(scores))) # + colab={"base_uri": "https://localhost:8080/"} id="CcSQo0evXeyF" outputId="fa05102a-4ca9-4338-88fd-44290a14f720" from sklearn.tree import DecisionTreeClassifier gs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0), param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}], scoring='accuracy', cv=2) scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5) print('CV 정확도: %.3f +/- %.3f' % (np.mean(scores), np.std(scores))) # + [markdown] id="ycotzjbxXeyF" # <br> # <br> # + [markdown] id="kLR76gLSXeyF" # # 여러 가지 성능 평가 지표 # + [markdown] id="vdQoiaMAXeyF" # ... # + [markdown] id="L59OVK5DXeyF" # ## 오차 행렬 # + colab={"base_uri": "https://localhost:8080/", "height": 320} id="8noOk3aeXeyF" outputId="b80456f8-0ccf-4d7f-c66a-5241ca3ad4c3" Image(url='https://git.io/JtsT1', width=300) # + colab={"base_uri": "https://localhost:8080/"} id="wsL1yO1UXeyF" outputId="c2906e39-b2d6-44fe-9128-5ab44413405f" from sklearn.metrics import confusion_matrix pipe_svc.fit(X_train, y_train) y_pred = pipe_svc.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) print(confmat) # + colab={"base_uri": "https://localhost:8080/", "height": 189} id="9FNJQbfvXeyG" outputId="554a9e17-713c-4c47-d880-0cc2a1df6be9" fig, ax = plt.subplots(figsize=(2.5, 2.5)) ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3) for i in range(confmat.shape[0]): for j in range(confmat.shape[1]): ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center') plt.xlabel('Predicted label') plt.ylabel('True label') plt.tight_layout() # plt.savefig('images/06_09.png', dpi=300) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 280} id="AD4POEzOXeyG" outputId="3a33a0df-99f8-4a5a-9fe9-7ead25b196fe" from sklearn.metrics import plot_confusion_matrix plot_confusion_matrix(pipe_svc, X_test, y_test) # plt.savefig('images/06_plot_confusion_matrix.png', dpi=300) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="0Pct5OUkXeyG" outputId="656b77a7-6285-454b-86c2-e327f99841fd" plot_confusion_matrix(pipe_svc, X_test, y_test, normalize='all') # plt.savefig('images/06_plot_confusion_matrix_normalize.png', dpi=300) plt.show() # + [markdown] id="2Psy3VrhXeyG" # ### 추가 노트 # + [markdown] id="BkhNZ-DlXeyG" # 앞서 클래스 레이블을 인코딩했기 때문에 *악성(malignant)* 종양이 "양성" 클래스(1), *양성(benign)* 종양이 "음성" 클래스(0)입니다: # + colab={"base_uri": "https://localhost:8080/"} id="bNnk8PRZXeyG" outputId="e816f804-9484-480e-8350-a318b2585cab" le.transform(['M', 'B']) # + [markdown] id="305wBa5EXeyH" # 그리고 다음처럼 오차 행렬을 출력했습니다: # + colab={"base_uri": "https://localhost:8080/"} id="OWCaiesTXeyH" outputId="67af2837-86e2-4c95-e9bb-c2f7af1e5ddf" confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) print(confmat) # + [markdown] id="rFzvm_O0XeyH" # 클래스 0으로 올바르게 예측된 (진짜) 클래스 0 샘플(진짜 음성)이 행렬의 왼쪽 위에 있는 값입니다(인덱스 0,0). 진짜 음성이 오른쪽 아래(인덱스 1,1)에 있고 진짜 양성이 왼쪽 위에 있도록 순서를 바꾸려면 다음처럼 `labels` 매개변수를 사용합니다: # + colab={"base_uri": "https://localhost:8080/"} id="E64NcOUfXeyH" outputId="bca83440-d99e-4ecd-d99c-074409618948" confmat = confusion_matrix(y_true=y_test, y_pred=y_pred, labels=[1, 0]) print(confmat) # + [markdown] id="xnBgt6VAXeyH" # 결론: # # 이 예에서 클래스 1(악성 종양)이 양성 클래스입니다. 모델은 71개의 샘플을 정확하게 클래스 0(진짜 음성)으로 분류했습니다. 40개의 샘플은 클래스 1(진짜 양성)로 올바르게 분류했습니다. 클래스 1에 해당하는 두 개의 샘플을 클래스 0(가짜 음성)으로 잘못 분류했고 양성 종양인 하나의 샘플을 악성 종양(가짜 양성)으로 잘못 분류했습니다. # + [markdown] id="wZIOnICrXeyH" # <br> # <br> # + [markdown] id="73IYKbGNXeyH" # ## 분류 모델의 정밀도와 재현율 최적화 # + colab={"base_uri": "https://localhost:8080/"} id="hEYG_6vhXeyH" outputId="9c9be998-16ba-4f4a-f8ab-a622af234459" from sklearn.metrics import precision_score, recall_score, f1_score print('정밀도: %.3f' % precision_score(y_true=y_test, y_pred=y_pred)) print('재현율: %.3f' % recall_score(y_true=y_test, y_pred=y_pred)) print('F1: %.3f' % f1_score(y_true=y_test, y_pred=y_pred)) # + colab={"base_uri": "https://localhost:8080/"} id="PmSyi_c1XeyI" outputId="f25f198d-fc83-45ef-b277-4693076fc1f4" from sklearn.metrics import make_scorer scorer = make_scorer(f1_score, pos_label=0) c_gamma_range = [0.01, 0.1, 1.0, 10.0] param_grid = [{'svc__C': c_gamma_range, 'svc__kernel': ['linear']}, {'svc__C': c_gamma_range, 'svc__gamma': c_gamma_range, 'svc__kernel': ['rbf']}] gs = GridSearchCV(estimator=pipe_svc, param_grid=param_grid, scoring=scorer, cv=10, n_jobs=-1) gs = gs.fit(X_train, y_train) print(gs.best_score_) print(gs.best_params_) # + [markdown] id="9z05dC0tXeyI" # <br> # <br> # + [markdown] id="QsBmG0qbXeyI" # ## ROC 곡선 그리기 # + colab={"base_uri": "https://localhost:8080/", "height": 369} id="FehL-4KWXeyI" outputId="c7db4b15-9ff3-476b-d4b0-ea1668d41c17" from sklearn.metrics import roc_curve, auc from distutils.version import LooseVersion as Version from scipy import __version__ as scipy_version if scipy_version >= Version('1.4.1'): from numpy import interp else: from scipy import interp pipe_lr = make_pipeline(StandardScaler(), PCA(n_components=2), LogisticRegression(penalty='l2', random_state=1, C=100.0)) X_train2 = X_train[:, [4, 14]] cv = list(StratifiedKFold(n_splits=3).split(X_train, y_train)) fig = plt.figure(figsize=(7, 5)) mean_tpr = 0.0 mean_fpr = np.linspace(0, 1, 100) for i, (train, test) in enumerate(cv): probas = pipe_lr.fit(X_train2[train], y_train[train]).predict_proba(X_train2[test]) fpr, tpr, thresholds = roc_curve(y_train[test], probas[:, 1], pos_label=1) mean_tpr += interp(mean_fpr, fpr, tpr) mean_tpr[0] = 0.0 roc_auc = auc(fpr, tpr) plt.plot(fpr, tpr, label='ROC fold %d (area = %0.2f)' % (i+1, roc_auc)) plt.plot([0, 1], [0, 1], linestyle='--', color=(0.6, 0.6, 0.6), label='Random guessing') mean_tpr /= len(cv) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) plt.plot(mean_fpr, mean_tpr, 'k--', label='Mean ROC (area = %0.2f)' % mean_auc, lw=2) plt.plot([0, 0, 1], [0, 1, 1], linestyle=':', color='black', label='Perfect performance') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.legend(loc="lower right") plt.tight_layout() # plt.savefig('images/06_10.png', dpi=300) plt.show() # + [markdown] id="Rgqcz6hhXeyI" # 사이킷런 0.22 버전에서 `plot_roc_curve()` 함수와 `plot_precision_recall_curve()` 함수를 사용하면 ROC 곡선과 정밀도-재현율 곡선을 쉽게 그릴 수 있습니다. # + colab={"base_uri": "https://localhost:8080/", "height": 334} id="nZ1WFNoRXeyI" outputId="39bb68ab-6c81-49df-d0f1-ec23750a8e39" from sklearn.metrics import plot_roc_curve fig, ax = plt.subplots(figsize=(7, 5)) mean_tpr = 0.0 mean_fpr = np.linspace(0, 1, 100) for i, (train, test) in enumerate(cv): pipe_lr.fit(X_train2[train], y_train[train]) roc_disp = plot_roc_curve(pipe_lr, X_train2[test], y_train[test], name=f'Fold {i}', ax=ax) mean_tpr += interp(mean_fpr, roc_disp.fpr, roc_disp.tpr) mean_tpr[0] = 0.0 plt.plot([0, 1], [0, 1], linestyle='--', color=(0.6, 0.6, 0.6), label='Random guessing') mean_tpr /= len(cv) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) plt.plot(mean_fpr, mean_tpr, 'k--', label='Mean ROC (area = %0.2f)' % mean_auc, lw=2) plt.plot([0, 0, 1], [0, 1, 1], linestyle=':', color='black', label='Perfect performance') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.legend(loc="lower right") # plt.savefig('images/06_plot_roc_curve.png', dpi=300) plt.show() # + colab={"base_uri": "https://localhost:8080/", "height": 334} id="nCCmu8NoXeyI" outputId="614719bc-b07c-494e-bed6-4d512a8c1a00" from sklearn.metrics import plot_precision_recall_curve fig, ax = plt.subplots(figsize=(7, 5)) mean_precision = 0.0 mean_recall = np.linspace(0, 1, 100) for i, (train, test) in enumerate(cv): pipe_lr.fit(X_train2[train], y_train[train]) pr_disp = plot_precision_recall_curve( pipe_lr, X_train2[test], y_train[test], name=f'Fold {i}', ax=ax) mean_precision += interp(mean_recall, pr_disp.recall[::-1], pr_disp.precision[::-1]) plt.plot([0, 1], [1, 0], linestyle='--', color=(0.6, 0.6, 0.6), label='Random guessing') mean_precision /= len(cv) mean_auc = auc(mean_recall, mean_precision) plt.plot(mean_recall, mean_precision, 'k--', label='Mean ROC (area = %0.2f)' % mean_auc, lw=2) plt.plot([0, 1, 1], [1, 1, 0], linestyle=':', color='black', label='Perfect performance') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('Recall') plt.ylabel('Precision') plt.legend(loc="lower left") # plt.savefig('images/06_plot_precision_recall_curve.png', dpi=300) plt.show() # + [markdown] id="svIhSMa9XeyI" # <br> # <br> # + [markdown] id="0LYOJgmcXeyJ" # ## 다중 분류의 성능 지표 # + id="-SJEmIidXeyJ" pre_scorer = make_scorer(score_func=precision_score, pos_label=1, greater_is_better=True, average='micro') # + [markdown] id="6lCmOCMYXeyJ" # # 불균형한 클래스 다루기 # + id="p5fjpTa6XeyJ" X_imb = np.vstack((X[y == 0], X[y == 1][:40])) y_imb = np.hstack((y[y == 0], y[y == 1][:40])) # + colab={"base_uri": "https://localhost:8080/"} id="PhNk9XoMXeyJ" outputId="7dec950a-7f32-426e-ce28-2837324855c3" y_pred = np.zeros(y_imb.shape[0]) np.mean(y_pred == y_imb) * 100 # + colab={"base_uri": "https://localhost:8080/"} id="C6AAyAOvXeyJ" outputId="55b98bee-fb0e-4327-eec1-c408907a9078" from sklearn.utils import resample print('샘플링하기 전 클래스 1의 샘플 개수:', X_imb[y_imb == 1].shape[0]) X_upsampled, y_upsampled = resample(X_imb[y_imb == 1], y_imb[y_imb == 1], replace=True, n_samples=X_imb[y_imb == 0].shape[0], random_state=123) print('샘플링하기 후 클래스 1의 샘플 개수:', X_upsampled.shape[0]) # + id="ctN4217bXeyJ" X_bal = np.vstack((X[y == 0], X_upsampled)) y_bal = np.hstack((y[y == 0], y_upsampled)) # + colab={"base_uri": "https://localhost:8080/"} id="2A7NXfGuXeyJ" outputId="b49f8cab-a9f2-4c22-eb92-0dda95d6e2bd" y_pred = np.zeros(y_bal.shape[0]) np.mean(y_pred == y_bal) * 100 # + [markdown] id="_dlfjgmBXeyK" # <br> # <br>
ch06/ch06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- dbutils.widgets.text("text_box1", "", "Text") dbutils.widgets.text("text_box2", "", "Text") dbutils.widgets.text("text_box3", "", "Text") dbutils.widgets.text("text_box4", "", "Text") dbutils.widgets.text("text_box5", "", "Text") dbutils.widgets.removeAll() dbutils.widgets.remove("text_box")
ch4 - Manage Databricks with Databricks Utilities/Remove Widgets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Erasmus+ ICCT project (2018-1-SI01-KA203-047081) # Toggle cell visibility from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) # - # ## Neskončno ravnotežnih točk - primer 1 # # Ta interaktivni primer obravnava $2\times2$ matriko, ki ima neskončno ravnotežnih točk, ki ležijo na osi $x_1$ (teoretične osnove so zajete v primeru [Ravnotežne točke](SS-13-Ravnotezne_tocke.ipynb)). # # Če želimo doseči, da $x_2=0$ predstavlja prostor, ki ga zasedajo ravnotežne točke, mora veljati: # # $$ # A\bar{x}=0 \quad \forall \, \bar{x}\in\begin{bmatrix} \alpha \\ 0\end{bmatrix} \, \text{, kjer} \, \alpha\in\mathbb{R}. # $$ # # Iz tega sledi, da mora vektor $\begin{bmatrix} \alpha \\ 0\end{bmatrix}$ pripadati jedru (ničelnemu prostoru, ang. null space) matrike $A$. # # ### Kako upravljati s tem interaktivnim primerom? # - Spreminjaj elemente matrike $A$ in opazuj, kako se spreminjajo ravnotežne točke. # - Poizkusi matriko $A$ prilagoditi na način, da bodo ravnotežne točke ležale na osi $x_1$. # - Podrobno si poglej lastne vrednosti izbrane matrike $A$. Ali lahko dosežeš neskončno ravnotežnih točk brez lastne vrednosti enake 0? # + #Preparatory Cell import control import numpy from IPython.display import display, Markdown import ipywidgets as widgets import matplotlib.pyplot as plt import sympy as sym #print a matrix latex-like def bmatrix(a): """Returns a LaTeX bmatrix - by <NAME> (ICCT project) :a: numpy array :returns: LaTeX bmatrix as a string """ if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{bmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{bmatrix}'] return '\n'.join(rv) # Display formatted matrix: def vmatrix(a): if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{vmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{vmatrix}'] return '\n'.join(rv) #create a NxM matrix widget def createMatrixWidget(n,m): M = widgets.GridBox(children=[widgets.FloatText(layout=widgets.Layout(width='100px', height='40px'), value=0.0, disabled=False, label=i) for i in range(n*m)], layout=widgets.Layout( #width='50%', grid_template_columns= ''.join(['100px ' for i in range(m)]), #grid_template_rows='80px 80px 80px', grid_row_gap='0px', track_size='0px') ) return M #extract matrix from widgets and convert to numpy matrix def getNumpyMatFromWidget(M,n,m): #get W gridbox dims M_ = numpy.matrix(numpy.zeros((n,m))) for irow in range(0,n): for icol in range(0,m): M_[irow,icol] = M.children[irow*3+icol].value #this is a simple derived class from FloatText used to experience with interact class floatWidget(widgets.FloatText): def __init__(self,**kwargs): #self.n = n self.value = 30.0 #self.M = widgets.FloatText.__init__(self, **kwargs) # def value(self): # return 0 #self.FloatText.value from traitlets import Unicode from ipywidgets import register #matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value ! class matrixWidget(widgets.VBox): def updateM(self,change): for irow in range(0,self.n): for icol in range(0,self.m): self.M_[irow,icol] = self.children[irow].children[icol].value #print(self.M_[irow,icol]) self.value = self.M_ def dummychangecallback(self,change): pass def __init__(self,n,m): self.n = n self.m = m self.M_ = numpy.matrix(numpy.zeros((self.n,self.m))) self.value = self.M_ widgets.VBox.__init__(self, children = [ widgets.HBox(children = [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)] ) for j in range(n) ]) #fill in widgets and tell interact to call updateM each time a children changes value for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] self.children[irow].children[icol].observe(self.updateM, names='value') #value = Unicode('<EMAIL>', help="The email value.").tag(sync=True) self.observe(self.updateM, names='value', type= 'All') def setM(self, newM): #disable callbacks, change values, and reenable self.unobserve(self.updateM, names='value', type= 'All') for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].unobserve(self.updateM, names='value') self.M_ = newM self.value = self.M_ for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].observe(self.updateM, names='value') self.observe(self.updateM, names='value', type= 'All') #self.children[irow].children[icol].observe(self.updateM, names='value') #overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?) class sss(control.StateSpace): def __init__(self,*args): #call base class init constructor control.StateSpace.__init__(self,*args) #disable function below in base class def _remove_useless_states(self): pass # + #define the matrixes A=matrixWidget(2,2) A.setM(numpy.matrix('1. 0.; 0. 1.')) def main_callback(matA,DW): As = sym.Matrix(matA) NAs = As.nullspace() t = numpy.linspace(-10,10,1000) if len(NAs) == 1: eq1 = [t[i]*numpy.matrix(NAs[0]) for i in range(0,len(t))] x1 = [eq1[i][0,0] for i in range(0,len(t))] x2 = [eq1[i][1,0] for i in range(0,len(t))] fig = plt.figure(figsize=(6,6)) if len(NAs) == 0: plt.plot(0,0,'bo') if len(NAs) == 1: plt.plot(x1,x2) if len(NAs) == 2: plt.fill((-5,-5,5,5),(-5,5,5,-5),alpha=0.5) plt.xlim(left=-5,right=5) plt.ylim(top=5,bottom=-5) plt.grid() plt.xlabel('$x_1$') plt.ylabel('$x_2$') print('Baza jedra matrike A (po vrstici) je %s. \nLastni vrednosti sta %s' %(str(numpy.array(NAs)), str(numpy.linalg.eig(matA)[0]))) #create dummy widget DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) #create button widget START = widgets.Button( description='Test', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Test', icon='check' ) def on_start_button_clicked(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW.value> 0 : DW.value = -1 else: DW.value = 1 pass START.on_click(on_start_button_clicked) out = widgets.interactive_output(main_callback,{'matA':A,'DW':DW}) out1 = widgets.HBox([out, widgets.VBox([widgets.Label(''),widgets.Label(''),widgets.Label(''),widgets.Label('$\qquad \qquad A=$')]), widgets.VBox([widgets.Label(''),widgets.Label(''),widgets.Label(''),A,START])]) out.layout.height = '450px' display(out1) # + #create dummy widget 2 DW2 = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) DW2.value = -1 #create button widget START2 = widgets.Button( description='Prikaži pravilne odgovore', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Pritisni za prikaz pravilnih odgovorov', icon='check', layout=widgets.Layout(width='200px', height='auto') ) def on_start_button_clicked2(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW2.value> 0 : DW2.value = -1 else: DW2.value = 1 pass START2.on_click(on_start_button_clicked2) def main_callback2(DW2): if DW2 > 0: display(Markdown(r'''Odgovor: Matriko lahko definiramo tako, da izberemo vrstične vektorje, ki so ortogonalni glede na jedro matrike. Možna matrika je tako npr.: $$ A=\begin{bmatrix} 0 & 1 \\ 0 & 1 \end{bmatrix}. $$''')) else: display(Markdown('')) #create a graphic structure to hold all widgets alltogether2 = widgets.VBox([START2]) out2 = widgets.interactive_output(main_callback2,{'DW2':DW2}) #out.layout.height = '300px' display(out2,alltogether2)
ICCT_si/examples/04/SS-14-Ravnotezne_tocke_Primer_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # $$ # \textbf{ # \begin{center} # { # \large{School of Engineering and Applied Science (SEAS), Ahmedabad University}\vspace{4mm} # } # \end{center} # % # \begin{center} # \large{B.Tech(ICT) Semester IV: Probability and Random Processes (MAT 202) }\\ \vspace{3mm} # \end{center} # } # \begin{itemize} # \item Group No : SM1 # % \item Name (Roll No) : Group Members # \item Roll no: <NAME> (AU1841040) # \item Roll no: <NAME> (AU1841034) # \item Project Title: Modelling probabilistic fatigue crack propogation data and predicting model parameters # # \end{itemize} # $$ # # # Introduction # # ## Background # # Mechanical modelling tends to be a very complex mathematical task.One of the most important problems realted to modelling probability in mechanics is to find the reliabity of materials that are used to build objects. A lot of research has taken place to compute fatigue effects on materials and structures.There have been a lot of developments in this area after the proposal Paris' Law. Most of the models proposed realted to fatigue crack propogation are deterministic and have some limitations such as that they arise from arbitary empirical assumptions.A proper estimation of fatigue crack porpogation rates with respect to residual stress and stress ratio helps us to estimate optimal materials for mechanical tasks. For e.g. : To determine the type of steel to be used when we are building a bridge. Proper estimation of fatigue crack proporgation and finding its probabilisitic curves also gives great insights to the propertiees of material and can be useful when the material is being improved, i.e. at the time of development on newer types of alloys. # # The First breakthrough in the field of fatigue propogation estimation was the Paris' Law which was followed by various approaches[8].Initially Local strain-based approches were proposed to model fatigue crack propogation on notch based components further a link was established between local strain-based approached to fatigue and Frature mechanics based fatigue crack propogation models.[6,9-14] As research continued models were proposed that using residual stress concepts ecplained stress ratio effects as well as interaction effects on crack growth rates.Several approches were proposed which were analytical and or numerical.[15-18]The underlying concept behind the proposed local approaches for fatigue crack propagation modelling consists of assuming fatigue crack propagation as a process of continuous failure of consecutive representative material elements (continuous re-initializations). Such a kind of approaches has been demonstrated to correlate fatigue crack propagation data from several sources, including the stress ratio effects[14-18]. The crack tip stress-strain fields are computed using elastoplastic analysis, which are applied together a fatigue damage law to predict the failure of the representative material elements. The simplified method of Neuber[19] or Moftakhar[20] et al. may be used to compute the elastoplastic stress field at the crack tip vicinity using the elastic stress distribution given by the Fracture Mechanics.[6,20-21] One Such model (given by Noorzi)[6,14-15] the unigrow model, modelled the fatigue crack growth based on elastic–plastic crack tip stress–strain history regarded the process of fatigue crack propogation as a process of successive crack re-initiation in the crack tip region. [1,6] # # # We take the unigrow model and extend it to relate it with a probabilistic contruct to find probabilistic fatigue crack propagation rates various materials. The Unigrow model to derive probabilistic fatigue crack propogation $da/dN-\bigtriangleup K$ fields for a particular selected material ((S355) structural mild steel)[1], for distinct stress ratios.The Deterministic model uses Morrows equation, Strain life relation along with the SWT relation to model parameters deterministically.For Probabilistic approach the strain-life field proposed by Castillo and Fernandez-Canteli [7] and Shane-Watson-Topper-life field [1] which are based on Weibull Distribution are used to generalize the results to account for mean stress effects using percentile curves. The simulation was modelled using the data acquired[9,1] and was further extended where we tried to make a prediction model for Threshold value of life time[N0], Endurance limit of strain [E0], Fatigue Limit of SWT parameter[SWT] and the Weibull parameters(lambda(Position),delta(Scale),Beta(Shape)) by Running Batch Gradient Descent[3,23] on Loss function from [23] and further using probability-weighted moments from [4] to predict N0,E0,SWT and Weibull parameters. The Probabilistic Life time fields are combined with Unigrow model [1-6] to finally compute the probabilistic fatigue crack proppogation field for distinct stress ratios. # # # ## Motivation # # The current deterministic works take into account a lot of parameters and some of these parameters cannot be determined easily or at all and hence these works take arbitary assumptions in the process of modelling. To solve this (Our Base Article) proposes a probabilistic model that not only arises from sound statistical and physical assumtions, it also manages to provide a probabilistic definiton of the whole strain-life field. Further, since mechanical modelling is a complex task and takes into account a lot of parameters, we have tried to simplify the process of finding probabilistic fatigue crack propogation fields.Not only this we have tried to use Gradient Descent Regression and Probability-weighted moments method to estimate certain parameters required for the task. # # ## Problem Statement/ Case Study # # # - To determine probabilistic fields of fatigue crack propogation rates wrt residual stress and stress ratio. # - Perfomance metric : how closely the computed probabilistic field data (Strain Life) and plots matches the computed data and plots given in the base article # - To propose a probabilistic Shane-Watson-Topper Field using the mathematical modelling of the Strain-Life Field proposed by Cantelli[7]. # - Performance metric : how closely the shape of p-SWT-n curve match the experimental/deteriminstic data at a given parameter value (probability) # # # # Data Acusition # # - Yes, this Special Assignment is Data Dependent. # - We have gathered data from Two Types of sources: # - Data Mentioned in reference papers [1,5]. # - Data synthesised from using simple linear equations to model plots of datasets given in paper using theortical formulaes supplied. # - Postulated experimental data has been synthesised in such a way # # --- # ## Theoratical Generation of Data # --- # # - Strain Life (Morrow equation): # # $$ # \frac{\Delta \epsilon}{2} = \frac{\sigma'_f}{E}(2N_f)^b + \epsilon'_f(2N_f)^c # $$ # - Reversal Stress Ratio: # $$ # \frac{\Delta \sigma}{2} = \sigma_{max} = \sigma_f'(2N_f)^b # $$ # - SWT(Smith,Watson and Topper fatigue damage parameter): # $$ # SWT = \sigma_{max}\frac{\Delta\epsilon}{2} = \frac{(\sigma_f'^2)(2N_f)^{2b}}{E} + \sigma_f'\epsilon_f'(2N_f)^{b+c} # $$ # where,<br> # $\sigma^*_f$: fatigue strenght coefficient,<br> # $b$: fatigue strength exponent,<br> # $\epsilon^*_f$: fatigue ductility coefficient,<br> # $b$: fatigue ductility exponent and<br> # $E$: Young modulus<br> # # # The values of the constants for S355 Mild Steel are: # # |$E GPa$|$\sigma_f' MPa$|$b$|$\epsilon_f'$|c| # |----|----|----|----|----| # |211.60|952.20|-0.0890|0.7371|-0.6640| # # # Probabilistic Model Used/ PRP Concept Used # # We extend the Unigrow Model for Fatigue crack propogation using probabilistic fields and work on the following model represented by: # # ![Block Diagram](BlockDiagram.png) # # The fatigue crack propagation modelling based on local approaches requires a fatigue damage relation to compute the # number of cycles to fail the elementary material blocks. # # Here Our main focus(Highlighted in red bounding box) is to model the generation of probabilistic Strain-Life and SWT-Life fields. The model assumes that the fatigue life, $N_f$, and the total strain amplitude, $Ea$, are random variables. # # # --- # ## General information on Weibul Distribution # --- # If $X$ is a random variable denoting the _time to failure_, the __Weibull distribution__ gives a distribution for which the _failure rate_ is proportional to a power of time. # # $$ # f_X(x) = # \frac{\beta}{\delta}(\frac{x-\lambda}{\delta})^{\beta-1}e^{-(\frac{x-\lambda}{\delta})^\beta} # $$ # $$ # F_X(x;\lambda,\delta,\sigma) = 1-e^{-({\frac{x-\lambda}{\delta}})^\beta} # $$ # # where $\beta > 0$ is the __shape parameter__, # # $\delta > 0$ is the __scale parameter__, # # $\lambda > x$ is the __location parameter__ (the minimum value of X). # # Percentile points, # # $$ # x_p = \lambda + \delta(-log(1-p))^{\frac{1}{\beta}} # $$ # where $0\leq p\leq 1$ # # __Important Properties of Weibull Distribution__ # # - Stable with respect to location and scale # $$ # X \sim W(\lambda,\delta,\beta) \iff \frac{X-a}{b} \sim W(\frac{\lambda-a}{b},\frac{\delta}{b},\beta) # $$ # # - It is stable with respect to Minimum Operations.i.e., if $X_1,X_2,X_3,.....X_m$ are independent and identical distribution,then # $$ # X_i\sim W(\lambda,\delta,\beta) \iff min(X_1,X_2,....,X_m) \sim W(\lambda,\delta m^{\frac{1}{\beta}},\beta) # $$ # if a set of independent and identical distribution is weibull then its minimum is also a Weibull Random Variable # # --- # ### Relevant Variable involved for modeling: # --- # # $P$:Probability of fatigue faliure<br> # $N$:Number of stress cycles to failure<br> # $N_0$:Threshold value of N (min lifetime)<br> # $Ea$: Strain <br> # $Ea_0$: Endurance limit of Ea;<br> # $SWT$:Smith,Watson and Topper fatigue damage parameter<br> # $SWT_0$:Fatigue limit<br> # # Putting Related variables together we have three varaibles(based on II Theorem)<br> # # #### For Strain Life Field # # $$ # \frac{N}{N_0},\frac{Ea}{Ea_0},P \\ # P = q(\frac{N}{N_0},\frac{Ea}{Ea_0}) # $$ # where $q()$ is a function we are to determine<br> # so P can be any monotone function of $\frac{N}{N_0},\frac{Ea}{Ea_0}$ , as $h(\frac{N}{N_0})$ $\&$ $g(\frac{Ea}{Ea_0})$ # # We denote them as # $$ # N^* = h(\frac{N}{N_0}) \\ # SWT^* = g(\frac{Ea}{Ea_0}) # $$ # # #### For SWT Life Field # # $$ # \frac{N}{N_0},\frac{SWT}{SWT_0},P \\ # P = q(\frac{N}{N_0},\frac{SWT}{SWT_0}) # $$ # where $q()$ is a function we are to determine<br> # so P can be any monotone function of $\frac{N}{N_0},\frac{SWT}{SWT_0}$ , as $h(\frac{N}{N_0})$ $\&$ $g(\frac{SWT}{SWT_0})$ # # We denote them as # $$ # N^* = h(\frac{N}{N_0}) \\ # SWT^* = g(\frac{SWT}{SWT_0}) # $$ # # --- # ### Proper Modelled Construct # --- # # #### Strain-Life Field # # $$ # p = F(N^*_f;E^*_a) = 1 - exp(-(\frac{log\frac{N_f}{N_0}log\frac{Ea}{Ea_0}-\lambda}{\delta})^\beta) # $$ # # here $log(\frac{N_f}{N_0})loglog\frac{Ea}{Ea_0} \geq \lambda$ # # p is the probability of failure, N0 and εa0 are normalizing values, and $\lambda,\delta,\sigma$ are the non-dimensional Weibull # model parameters. # # $$ # N^*Ea^* \sim W(\lambda,\delta,\beta) \\ # N^*_f\sim W(\frac{\lambda}{Ea^* },\frac{\delta}{Ea^* },\beta) \\ # $$ # # #### Proposed SWT-N or S-N Field # # We have proposed SWT field as it has advantages over the normal strain life field as it uses the SWT fatigue damage parameter.Using this Damage Parameter we can account for mean stress effects on fatigue life # prediction. # # $$ # p = F(N^*_f;E^*_a) = 1 - exp(-(\frac{log\frac{N_f}{N_0}log\frac{SWT}{SWT_0}-\lambda}{\delta})^\beta) # $$ # # here $log(\frac{N_f}{N_0})loglog\frac{SWT}{SWT_0} \geq \lambda$ # # p is the probability of failure, N0 and $SWT_0$ are normalizing values, and $\lambda,\delta,\sigma$ are the non-dimensional Weibull # model parameters. # # $$ # N^*Ea^* \sim W(\lambda,\delta,\beta) \\ # N^*_f\sim W(\frac{\lambda}{SWT^* },\frac{\delta}{SWT^* },\beta) \\ # $$ # # --- # ### Justification of Weibull for S-N fields # --- # # Considerations: # # - __Weakest Link__: Fatigue lifetime of a longitudinal element is the minimum of its constituting particles.Thus we need minimum model for a longitudinal element $L = ml$ # # - __Stability__: The distribution function must hold for different lengths. # # - __Limit Behaviour__: Need Asymptotic family of Distribution # # - __Limited Range__: $N^*$ & $SWT^*$ has finite lower bound,coincide with theoretical end of CDF # $$ # N\geq N_0 \\ # SWT \geq SWT_0 # $$ # - __Compatibility__: $$E(N^*;SWT^*) = F(SWT^*;N^*)$$ i.e., Distribution of $N^*$ can be determined based on given $SWT^*$ and similarly $SWT^*$ from $N^*$. # # ___All these are Statisfied by Weibull Distribution___ # # # $p$-curves # $$ # log(\frac{SWT}{SWT^*}) = \frac{\lambda + \delta[-log(1-p)]^{\frac{1}{\beta}}}{log(\frac{N}{N_0})} # $$ # # Final Distribution # # $$ # N^*SWT^* \sim W(\lambda,\delta,\beta) \\ # log(\frac{N}{N_0)})log(\frac{SWT}{SWT_0}) \sim W(\lambda,\delta,\beta) \\ # log(\frac{N}{N_0)})\sim W(\frac{\lambda}{log(\frac{SWT}{SWT_0}) },\frac{\delta}{log(\frac{SWT}{SWT_0}) },\beta) # $$ # ---- # # The values for this model are: # # |$logN_0$|$logSWT_0$|$\lambda$|$\delta$|$\beta$| # |----|----|----|----|----| # |-4.1079|-4.4317|53.8423|7.2698|3.6226| # # |$logN_0$|$log\epsilon_{a0}$|$\lambda$|$\delta$|$\beta$| # |----|----|----|----|----| # |-3.2593|-9.1053|36.6676|5.8941|4.6952| # # # # # Pseudo Code/ Algorithm # # Algorithm for: # # ## PDF of Weibull distribution # # $$ # f_X(x) = # \frac{\beta}{\delta}(\frac{x-\lambda}{\delta})^{\beta-1}e^{-(\frac{x-\lambda}{\delta})^\beta} # $$ # # where $\beta > 0$ is the __shape parameter__, # # $\delta > 0$ is the __scale parameter__, # # $\lambda > x$ is the __location parameter__ (the minimum value of X). # # # # ## CDF of Weibull distribution # # $$ # F_X(x;\lambda,\delta,\sigma) = 1-e^{-({\frac{x-\lambda}{\delta}})^\beta} # $$ # # where $\beta > 0$ is the __shape parameter__, # # $\delta > 0$ is the __scale parameter__, # # $\lambda > x$ is the __location parameter__ (the minimum value of X). # # # # ## Percentile Curve of Weibull distribution # # $$ # x_p = \lambda + \delta(-log(1-p))^{\frac{1}{\beta}} # $$ # where $0\leq p\leq 1$ # # ## Simulation of Model # # - Gathering Data from Base Article and Associated Article, Generating Unspecified Data using simple Linear Equation and Matrix Operations. # - Data Gathering information is already given. # - Matrix and Mathematical operations applied using numpy. # - Estimating/Predciting values for weibull distribution using any estimation/prediction method on the gathered data. # - Estimated values taken from paper # - Estimation using one selected method shown in 'New Analysis'. # - Initialising the coded Weibull Distribution class on required data. # - Weibull distribution class contains methods the calculate pdf,cdf and percentile curves based on the Mathematical Formulaes stated above. # - The initialisation of random variable is done using inbuilt weibull distribution function in numpy. # - Plotting the p-SWT-N curves for various values of p along with the plot of postulated data (assumed to be experimental). # - Matplotlib is used to generate all the plots. # - Applying probabilistic field data to deterministic Unigrow Mode. # # # # ![algo1](images/algo1.png) # # # # Coding and Simulation # # ## Simulation Framework # # The values for this model are: # # - p-SWT-N field # # |$logN_0$|$logSWT_0$|$\lambda$|$\delta$|$\beta$| # |----|----|----|----|----| # |-4.1079|-4.4317|53.8423|7.2698|3.6226| # # - p-Ea-N field # # |$logN_0$|$log\epsilon_{a0}$|$\lambda$|$\delta$|$\beta$| # |----|----|----|----|----| # |-3.2593|-9.1053|36.6676|5.8941|4.6952| # # ## Reproduced Figures # # - Used tools: # - Python # - Matplotlib # - Numpy # # # # Comparison of results Probabilistic Strain Life Field: # # ![penpaper](images/p_e_n_paper.PNG) <br> # ![penmodel](images/p_e_n_model.PNG) <br> # # # # __The Above plot (for Strain vs Number of Cycles to Failure) shows percent of probability curves along with postulated data (Experimental for 0.5 Percent).__ # # Comparison of results Probabilistic Shane Watson Damage Life Field: # # ![psnpaper](images/p_s_n_paper.PNG) <br> # ![psnmodel](images/p_s_n_model.PNG) # # # # __The Above plot (for SWT Damage Parameter vs Number of Cycles to Failure) shows percent of probability curves along with postulated data (Experimental for 0.5 Percent).__ # # ## New Work Done # # Estimating and supplying analysis of parameters assumed/supplied externally and creating a closed form model with no external dependance that computes probabilistic fatigue crack propogation data using only the natural required input paramters. # # ### New Analysis # # - To Estimate and Check if the SWT-N parameters can be predicted using gradient descent based regression. # - Performance metric : To see if the Loss is reduced by Gradient descent or not and if the gradient descent works, if the predicted values match the supplied data. # # --- # #### Estimation of Threshold Value $(N_0,\Delta\sigma_0)$ # --- # # Using the analysis done by <NAME>. and <NAME>. [20] for lifetime regression models we formulate the following to predict the $SWT_0 and N_0$ # # # $$ # E[log(\frac{N}{N_0)}) | log(\frac{\Delta SWT}{\Delta SWT_0)})] = \frac{E[N^*\Delta SWT^*]}{log(\frac{\Delta SWT}{\Delta SWT_0)})} # $$ # $$ # E[log(N) | log(\frac{\Delta SWT}{\Delta SWT_0)})] = log(N_0) + \frac{K}{log(\frac{\Delta SWT}{\Delta SWT_0)})} \\ # where, K = \lambda + \delta\Gamma(1+\frac{1}{\beta}) # $$ # Minimize Error Function $Q$ # $$ # Q = \sum_{i=0}^m\sum_{j=1}^{n_i}(logN_{ij} - logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0})^2 # $$ # to get $logN_0$ and $log\Delta SWT_0$ # # We further minimise this cost function using a Gradient Descent algorithm to find optimal values of $N_0$ and $SWT_0$ # # __The intial Estimation of Threshold__ # - The starting parameters provided to gradient descent are calculated mathematically using, # # $$ # \mu_i = \frac{1}{n_i}\sum_{j=1}^{n_i}(logN_{ij} = logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0})^2 # $$ # # --- # #### Gradient Descent # --- # # Gradient Descent is used to find the minimun of a function known as cost function, here in our case is the error function.Gradient Descent is a iterative learning algorithm and with each step it moves closer to the minimum based on the gradient of the cost function and learning rate. [3-23] # # $$ # Cost function = J(\theta) \\ # Gradient = \frac{\partial J(\theta)}{\partial \theta_i} \\ # New parameter = \theta_i = \theta_i - \alpha \frac{\partial J(\theta)}{\partial \theta_i} \\ # $$ # # where $\theta$ are the parameters and $\alpha$ is the learning rate.Here, # # $$ # Cost function = Q = \sum_{i=0}^m\sum_{j=1}^{n_i}(logN_{ij} - logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0})^2 \\ # $$ # $$ # \frac{\partial Q}{\partial logN_0} = \frac{1}{M}\sum -2(logN - logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0}) \\ # $$ # $$ # \frac{\partial Q}{\partial K} = \frac{1}{M}\sum \frac{-2}{log\Delta SWT_i - log\Delta SWT_0}(logN - logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0}) \\ # $$ # $$ # \frac{\partial Q}{\partial logSWT_0} = \frac{1}{M}\sum \frac{-2b}{(log\Delta SWT_i - log\Delta SWT_0)^2}(logN - logN_0 - \frac{K}{log\Delta SWT_i - log\Delta SWT_0}) # $$ # # # # # - Estimate the parameters of Weibull Distribution using PWM estimation on real and predicted data. # - Performance metric : To compare and see if the computed Weibull parameters using PWM match those supplied with the paper and if the predicted vs deterministic parameters match. # # --- # # #### Probability Weighted Moments(PWM) method to determine Weibull parameters # # --- # # The final equation based on paper [2] are, # $$ # \frac{3M_2 - M_0}{2M1 - M_0} = \frac{2 - 3.2^{\frac{-1}{\beta}} + 3^{\frac{-1}{\beta}}}{1 - 2^{\frac{-1}{\beta}}} \\ # \delta = \frac{2M_1 - M_0}{(1-- 2^{\frac{-1}{\beta}})\Gamma_{\beta}} \\ # \lambda = M_0 - \delta \Gamma_{\beta} # $$ # where $M_r = M_{1,r,0},r = 0,1,2$ and $\Gamma_{\beta} = \Gamma(1 + \frac{1}{\beta})$ and # # $$ # \hat{M_0} = \frac{1}{n}\sum_{i=1}^n x_i \\ # \hat{M_1} = \frac{1}{n(n-1)}\sum_{i=1}^n (i - 1)x_i \\ # \hat{M_2} = \frac{1}{n(n-1)(n-2)}\sum_{i=1}^n (i - 1)(i - 2)x_i \\ # $$ # # Since $\beta$ cannot be determined metamatically from the equation Numerical Method is used, # # - Newton's Method for Mathematical Approximation # $$ # x_{n+1} = x_n - \frac{f(x_n)}{f'(x_n)} # $$ # where, # $$ # f(x_n) = 2^{x}(C-3) + 3^{x} - (C-2) \\ # f'(x_n) = (log2)(2^x)(C-3) + log3(3^x) \\ # $$ # where $x = \frac{-1}{\beta}$ and $C = \frac{3M_2 - M_0}{2M1 - M_0}$ # # - Determining the p-Nf Weibull distribution given a fixed Value Of SWT (Shane Watson Damage Parameter) # - Performance metric : To compare and observe distribution of similar shape with difference in probability of failure for different damage parameter values # # --- # #### p-Nf given SWT # --- # # $$ # N^*SWT^* \sim W(\lambda,\delta,\beta) \\ # log(\frac{N}{N_0)})log(\frac{SWT}{SWT_0}) \sim W(\lambda,\delta,\beta) \\ # log(\frac{N}{N_0)})\sim W(\frac{\lambda}{log(\frac{SWT}{SWT_0}) },\frac{\delta}{log(\frac{SWT}{SWT_0}) },\beta) \\ # $$ # # # ### New Coding / Algorithm # # #### Estimation of Threshold Value $(N_0,\Delta\sigma_0)$ # # ![algo2](images/algo2.png) # # #### Probability Weighted Moments(PWM) method to determine Weibull parameters # # ![algo3](images/algo3.png) # # #### p-Nf given SWT # # ![algo4](images/algo4.png) # # ### New Results # # - To Estimate and Check if the Ea-N and SWT-N parameters can be predicted using gradient descent based regression. # # --- # #### Estimating Strain Life Field Parameters $Nf_0$ and $Ea_0$ Using Gradient Descent # --- # # ![StrainLifeCost](images/costviterEa.png) # # This is plot of Cost vs Iteration, it shows how cost function changes with iterations.It shows that the cost function taken from [2] converges using gradient descent algorithm on data generated Strain-Life Field. # # ![Strain Life Proposed V Predicted](images/StrainLifePropvPred.png) # # This is a plot of the proposed and predicted values and can be used to compare them. # # |Proposed $logN_0$|Proposed $log\epsilon_{a0}$|Predicted $logN_0$|Predicted $log\epsilon_{a0}$| # |----|----|----|----| # |-3.2593|-9.1053|-3.2169|-9.1285| # # --- # #### Estimating SWT Life Field Parameters $Nf_0$ and $SWT_0$ Using Gradient Descent # --- # # ![Strain Life Cost](images/costviterswt.png) # # This is plot of Cost vs Iteration, it shows how cost function changes with iterations.It shows that the cost function taken from [2] converges using gradient descent algorithm on data generated for SWT-Life Field. # # ![Strain Life Proposed V Predicted](images/SWTLifePropvPred.png) # # This is a plot of the proposed and predicted values and can be used to compare them. # # |Proposed $logN_0$|Proposed $logSWT_0$|Predicted $logN_0$|Predicted $logSWT_0$| # |----|----|----|----| # |-4.1079|-4.4317|-3.8413|-4.383809| # # # - Estimate the parameters of Weibull Distribution using PWM estimation on real and predicted data. # # --- # #### Estimating Strain Life Field Weibull Parameters Location, Scale and Shape Using Probability Weighted Moments # --- # # ![Strain Life Weibull](images/PWMPlotstrain.png) # # This is a plot of the proposed and predicted values for Location, Scale and Shape parametesrs of the Weibull Distribution of the Strain Life Field # # |Proposed Location|Proposed Scale|Proposed Shape|Predicted |Predicted Scale|Predicted Shape| # |----|----|----|----|----|----| # |62.8423|7.2698|3.6226|58.31011| 2.80965| 2.30816| # # --- # #### Estimating SWT Life Field Weibull Parameters Location, Scale and Shape Using Probability Weighted Moments # --- # # ![SWT Life Weibull](images/PWMPlotswt.png) # # This is a plot of the proposed and predicted values for Location, Scale and Shape parametesrs of the Weibull Distribution of the SWT Life Field # # |Proposed Location|Proposed Scale|Proposed Shape|Predicted Location|Predicted Scale|Predicted Shape| # |----|----|----|----|----|----| # |43.6676|5.8941|4.6952|38.11514|3.8694|4.6747| # # # # # - Determining the p-Nf Weibull distribution given a fixed Value Of SWT (Shane Watson Damage Parameter) # # --- # #### Estimating PDF of Nf Given a constant Value of SWT # --- # # ![P-Nf for SWT](images/nfswtpdf.png) # # # These Plots show the PDF of Number of Cycles to failure given one particular value of Shane Watson Damage parameter. # # # # # # ### New Inferences # # - To Estimate and Check if the SWT-N parameters can be predicted using gradient descent based regression. # - By observing the cost vs iterations plot for both the fields we can confidently say that the cost function converges to a minimum and that the values predicted by gradient descent are approximately same as those proposed under the error assumption that the postulated data here can differ from that given in paper as it was generated synthetically. # - We can infer that creating a closed form model on experimental data is possible given we combine the model with our proposed estimation model for SWT-N or Ea-N parameters # # - Estimate the parameters of Weibull Distribution using PWM estimation on real and predicted data. # - The values predicted by PWM method match the proposed values in the paper for both the probabilistic models under the error assumption that the postulated data here can differ from that given in paper as it was generated synthetically. # - We can infer that creating a closed form model on experimental data is possible given we combine the model with our proposed estimation model for SWT-N or Ea-N Weibull parameters # # - Determining the p-Nf Weibull distribution given a fixed Value Of SWT (Shane Watson Damage Parameter) # # - The Plots show the PDF of Number of Cycles to failure given one particular value of Shane Watson Damage parameter, observing and comparing these we see that number of loading cycles to failure reduces as SWT increases showing that the distribution is practical in nature. # # # # Inference Analysis/ Comparison # # Add Stuff Here From Sir Arpitsinhs's Notebook. # # # Contribution of team members # # ## Technical contribution of all team members # # ## Non-Technical contribution of all team members # # # Submission checklist for uploading on Google Drive # # This section provides the submission checklist for smooth and efficient submission process. (This is for your # reference and please remove this while writing your report). # # - Soft copy of this project Report # - Soft copy of Abstract # - Soft copy of Concept Map 1 and 2 # - Soft copy of base article # - Soft copy of analysis (hand written)(jupyter notebooks) # - Folder of matlab(python) codes (with proper naming) # - Folder of reproduced results in .fig and .jpg format # - latex (.tex) file of the project report. # # # References # # - [1] <NAME>, <NAME>, <NAME>, <NAME>, Modelling probabilistic fatigue crack propagation rates for a mild structural steel, Frattura ed Integrità Strutturale, 31(2015) 80-96. # - [2] <NAME>., <NAME>., Lifetime Regression Models Based on a Functional Equation of Physical Nature”, Journal of Applied Probability, 24 (1987) 160-169. # - [3] “Gradient Descent,” Gradient Descent - ML Glossary documentation. [Online]. Available: https://ml-cheatsheet.readthedocs.io/en/latest/gradient_descent.html. Accessed: 01-Apr-2020. # - [4]<NAME>, <NAME>, General probability weighted moments for the three-parameter Weibull Distribution and their application in S–N curves modelling, International Journal of Fatigue 33(2011) 1533-1538. # - [5] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., A comparison of the fatigue behavior between S355 and S690 steel grades, Journal of Constructional Steel Research, 79 (2012) 140–150. # - [6] <NAME>., <NAME>., <NAME>., A two parameter driving force for fatigue crack growth analysis, International Journal of Fatigue, 27 (2005)1277-1296. # - [7] <NAME>., <NAME>., A Unified Statistical Methodology for Modeling Fatigue Damage, Springer, (2009). # - [8] <NAME>., <NAME>., <NAME>., A rational analytic theory of fatigue, Trend Engineering, 13 (1961) 9-14. # - [9] <NAME>., A study of the effects of the cyclic thermal stresses on a ductile metal, Translations of the ASME, 76 (1954) 931-950. # - [10] <NAME>., Behaviour of materials under conditions of thermal stress, NACA TN-2933, National Advisory Committee for Aeronautics, (1954). # - [11] <NAME>., Cyclic plastic strain energy and fatigue of metals, Int. Friction, Damping and Cyclic Plasticity, ASTM STP 378, (1965) 45-87. # - [12] <NAME>., <NAME>., <NAME>.H., A Stress-Strain Function for the Fatigue of Metals, Journal of Materials, 5(4) (1970) 767-778. # - [13] <NAME>., <NAME>., <NAME>., <NAME>., Local stress–strain field intensity approach to fatigue life prediction under random cyclic loading, International Journal of Fatigue, 23 (2001) 903–910. # - [14] <NAME>., <NAME>., <NAME>., A study of the stress ratio effects on fatigue crack growth using the unified two-parameter fatigue crack growth driving force, International Journal of Fatigue, 29 (2007) 1616-1633. # - [15] <NAME>., <NAME>., <NAME>., Prediction of fatigue crack growth under constant amplitude loading and a single overload based on elasto-plastic crack tip stresses and strains, Engineering Fracture Mechanics, 75 (2008) 188-206. # - [16] <NAME>., <NAME>., Fatigue crack propagation model based on a local strain approach, Journal of Constructional Steel Research, 49 (1999) 139–155. # - [17] <NAME>., A notch stress-strain analysis approach to fatigue crack growth, Engineering Fracture Mechanics, 21 (1985) 245-261. # - [18] <NAME>., <NAME>., A methodology for predicting fatigue crack propagation rates in titanium based on damage accumulation, Scripta Materialia, 56 (2007) 681–684. # - [19] <NAME>., Theory of stress concentration for shear-strained prismatic bodies with arbitrary nonlinear stress–strain law, Trans. ASME Journal of Applied Mechanics, 28 (1961) 544–551. # - [20] <NAME>., <NAME>., <NAME>., Calculation of elasto-plastic strains and stresses in notches under multiaxial loading, International Journal of Fracture, 70 (1995) 357-373. # - [21] <NAME>., <NAME>., <NAME>., An Efficient Method for Calculating Multiaxial Elasto-Plastic Notch Tip Strains and Stresses under Proportional Loading, Fatigue and Fracture Mechanics, ASTM STP 1296, <NAME>, <NAME>, <NAME>, Eds., American Society for Testing and Materials, 27 (1997) 613-629. # - [22] <NAME>., <NAME>., Elastic–plastic fatigue crack growth analysis under variable amplitude loading spectra,” International Journal of Fatigue, 31 (2009) 1828–1836. # - [23] <NAME>, An overview of gradient descent optimization algorithms, arXiv:1609.04747v2 [cs.LG] 15 Jun 2017
.ipynb_checkpoints/Report-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import requests import io #https://raw.githubusercontent.com/your-username/name-of-the-repository/master/name-of-the-file.csv url = '/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/listings.csv' urlB = '/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/listingsB.csv' df = pd.read_csv(url) dfB = pd.read_csv(urlB) # + df.head(15) # - dfB.head(5) average_prices_by_nei = df.groupby('neighbourhood') average_prices_by_nei = average_prices_by_nei[["price"]].mean() average_prices_by_neiB = dfB.groupby('neighbourhood') average_prices_by_neiB = average_prices_by_neiB[["price"]].mean() average_prices_by_nei average_prices_by_nei.to_csv(r'/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/av_prices.csv', index = True) average_prices_by_neiB.to_csv(r'/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/av_pricesB.csv', index = True) neighbourhoods = list(df.groupby('neighbourhood').groups.keys()) neighbourhoodsB = list(dfB.groupby('neighbourhood').groups.keys()) stats_by_nei = df.groupby('neighbourhood') n_listings_pernei = stats_by_nei[['host_id']].count() n_listings_pernei stats_by_neiB = dfB.groupby('neighbourhood') n_listings_perneiB = stats_by_neiB[['host_id']].count() n_listings_perneiB room_type_by_nei = df.groupby(['neighbourhood','room_type'])[['id']].count() room_type_by_nei room_type_by_neiB = dfB.groupby(['neighbourhood','room_type'])[['id']].count() room_type_by_neiB new_df = pd.DataFrame({'Entire':room_type_by_nei['id'].iloc[::3].values, 'Private':room_type_by_nei['id'].iloc[1::3].values, 'Shared':room_type_by_nei['id'].iloc[2::3].values}, index = neighbourhoods) new_df new_dfB = pd.DataFrame({'Entire':room_type_by_neiB['id'].iloc[::3].values, 'Private':room_type_by_neiB['id'].iloc[1::3].values, 'Shared':room_type_by_neiB['id'].iloc[2::3].values}, index = neighbourhoodsB) new_dfB #room_type_by_nei[room_type_by_nei['room_type']=='Entire home/apt'].count() # + stats_by_nei = df.groupby('neighbourhood') max_price = stats_by_nei[['price']].max() min_price = stats_by_nei[['price']].min() max_price.columns = ["maximum"] min_price.columns = ["minimum"] average_prices_by_nei.columns = ["Average"] max_price reviews_per_month = stats_by_nei[['reviews_per_month']].mean() reviews_per_month.columns = ["Mean n of reviews per month"] # + stats_by_neiB = dfB.groupby('neighbourhood') max_priceB = stats_by_neiB[['price']].max() min_priceB = stats_by_neiB[['price']].min() max_priceB.columns = ["maximum"] min_priceB.columns = ["minimum"] average_prices_by_neiB.columns = ["Average"] max_priceB reviews_per_monthB = stats_by_neiB[['reviews_per_month']].mean() reviews_per_monthB.columns = ["reviews"] # - by_nei = pd.concat([average_prices_by_nei,max_price,min_price,new_df, reviews_per_month],axis=1) by_nei by_nei.to_csv(r'/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/tooltip_stats.csv', index = True) by_neiB = pd.concat([average_prices_by_neiB,max_priceB,min_priceB,new_dfB, reviews_per_monthB],axis=1) by_neiB by_neiB.to_csv(r'/Users/annaskarpalezou/Desktop/InfoViz-Final-Project/iv-airbnb/public/data/tooltip_statsB.csv', index = True) neighbourhoodsB
iv-airbnb/src/Data_munging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/MoisesAcostaNava/daa_2021_1/blob/master/28Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="HpYYqET0vGrX" h1 = 0 h2 = 0 m1 = 0 m2 = 0 # 1440 + 24 *6 contador = 0 # 5 + (1440 + ?) * 2 + 144 + 24 + 2= 3057 while [h1, h2, m1, m2] != [2,3,5,9]: if [h1, h2] == [m2, m1]: print(h1, h2,":", m1, m2) m2 = m2 + 1 if m2 == 10: m2 = 0 m1 = m1 + 1 if m1 == 6: h2 = h2 + 1 m2 = 0 contador = contador + 1 m2 = m2 + 1 if m2 == 10: m2 = 0 m1 = m1 + 1 if m1 == 6: m1 = 0 h2 = h2 +1 if h2 == 10: h2 = 0 h1 = h1 +1 print("Numero de palindromos: ",contador) # + id="hpJOvBgKwwAj" horario="0000" contador=0 while horario!="2359": inv=horario[::-1] if horario==inv: contador+=1 print(horario[0:2],":",horario[2:4]) new=int(horario) new+=1 horario=str(new).zfill(4) print("son ",contador,"palindromos") # 2 + (2360 * 4 ) + 24 # + id="VVwCVbgo0r5J" lista=[] for i in range(0,24,1): # 24 for j in range(0,60,1): # 60 1440 if i<10: if j<10: lista.append("0"+str(i)+":"+"0"+str(j)) elif j>=10: lista.append("0"+str(i)+":"+str(j)) else: if i>=10: if j<10: lista.append(str(i)+":"+"0"+str(j)) elif j>=10: lista.append(str(i)+":"+str(j)) # 1440 + 2 + 1440 + 16 * 2 = 2900 lista2=[] contador=0 for i in range(len(lista)): # 1440 x=lista[i][::-1] if x==lista[i]: lista2.append(x) contador=contador+1 print(contador) for j in (lista2): print(j) # + id="h6f_tZHy4oHa" for x in range (0,24,1): for y in range(0,60,1): #1440 * 3 +13 = 4333 hora=str(x)+":"+str(y) if x<10: hora="0"+str(x)+":"+str(y) if y<10: hora=str(x)+"0"+":"+str(y) p=hora[::-1] if p==hora: print(f"{hora} es palindromo") # + id="T6muYtDy5Wdg" total = int(0) #Contador de numero de palindromos for hor in range(0,24): #Bucles anidados for para dar aumentar las horas y los minutos al mismo tiempo for min in range(0,60): hor_n = str(hor) #Variables min_n = str(min) if (hor<10): #USamos condiciones para que las horas y los minutos no rebasen el horario hor_n = ("0"+hor_n) if (min<10): min_n = ("0"+ min_n) if (hor_n[::-1] == min_n): #Mediante un slicing le damos el formato a las horas para que este empiece desde la derecha print("{}:{}".format(hor_n,min_n)) total += 1 #1 + 1440 * 5 =7201 # + id="RnmleDAo6FVQ" palindronum= int(0) for hor in range(0,24): for min in range(0,60): # 1440 principio= str(hor) final= str(min) if (hor<10): principio=("0"+principio) if (min<10): final=("0"+final) if (principio[::-1]==final): print(principio +":"+final) palindronum= palindronum+1 print(palindronum) # 1 + 1440 * 5 = 7201
28Octubre.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### 1)데이터 시각화의 중요성에 대하여 Anscombe's quartet로 설명하세요. # !pip install seaborn import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython.display import display df=sns.load_dataset("anscombe") df df1=df[df["dataset"]=="I"] df2=df[df["dataset"]=="II"] df3=df[df["dataset"]=="III"] df4=df[df["dataset"]=="IV"] #각 데이터셋의 요약통계(평균, 분산 확인) display(df1.describe()) display(df2.describe()) display(df3.describe()) display(df4.describe()) #각 데이터셋의 상관관계 확인 display(df1.corr()) display(df2.corr()) display(df3.corr()) display(df4.corr()) # ### 4개의 데이터 셋의 값은 모두 다르지만, 평균, 표준편차, 상관계수는 매우 비슷하다. plt.plot(df1['x'], df1['y'], 'bo') plt.plot(df2['x'], df2['y'], 'yo') plt.plot(df3['x'], df3['y'], 'go') plt.plot(df4['x'], df4['y'], 'ro') #figure에 4개의 데이터 plot을 표현 ax1=plt.figure().add_subplot(2, 2, 1) ax2=plt.figure().add_subplot(2, 2, 2) ax3=plt.figure().add_subplot(2, 2, 3) ax4=plt.figure().add_subplot(2, 2, 4) ax1.plot(df1['x'], df1['y'], 'bo') ax2.plot(df2['x'], df2['y'], 'yo') ax3.plot(df3['x'], df3['y'], 'go') ax4.plot(df4['x'], df4['y'], 'ro') ax1.set_title('DataSet I') ax2.set_title('DataSet II') ax3.set_title('DataSet III') ax4.set_title('DataSet IV') # ### 4개의 데이터셋의 기초 통계량은 매우 비슷해서 같은 데이터라고 생각할 수 있지만 데이터 시각화를 해보면 데이터 분포는 매우 다르게 나온다. sns.lmplot(x='x', y='y', data=df, hue='dataset', col='dataset', ci=None) # ### 4개의 데이터 셋의 데이터 값들이 다르기 때문에 산점도 그래프의 모습은 모두 다르지만, 평균, 표준편차, 상관관계는 모두 매우 비슷하기 때문에 네 데이터셋의 회귀선 그래프는 같은 모양이다. # ### 위의 결과를 보아 데이터 셋의 내용은 다르지만 기술적 통계가 똑같을 수도 있기 때문에 한번에 간단하고 빠르게 데이터를 정확하게 확인할 수 있는 데이터 시각화가 매우 중요한 것을 알 수 있다.
visualization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd from pathlib import Path # %matplotlib inline import warnings warnings.simplefilter(action='ignore', category=Warning) # - # # Return Forecasting: Time Series Analysis & Modelling with CAD-PHY Exchange rate data. # In this notebook, you will load historical Canadian Dollar-Yen exchange rate futures data and apply time series analysis and modeling to determine whether there is any predictable behavior. # Currency pair exchange rates for CAD/JPY cad_jpy_df = pd.read_csv( Path("cad_jpy.csv"), index_col="Date", infer_datetime_format=True, parse_dates=True ) cad_jpy_df.head() # Trim the dataset to begin on January 1st, 1990 cad_jpy_df = cad_jpy_df.loc["1990-01-01":, :] cad_jpy_df.head() # # Initial Time-Series Plotting # Start by plotting the "Settle" price. Do you see any patterns, long-term and/or short? # + # Plot just the "Price" column from the dataframe: # YOUR CODE HERE! cad_jpy_df["Price"].plot() # - # **Question:** Do you see any patterns, long-term and/or short? # **Answer:** No patterns or trends observed from the plot. # --- # # Decomposition Using a Hodrick-Prescott Filter pip install statsmodels # Using a Hodrick-Prescott Filter, decompose the exchange rate price into trend and noise. # + import statsmodels.api as sm # Apply the Hodrick-Prescott Filter by decomposing the exchange rate price into two separate series: # YOUR CODE HERE! noise, trend = sm.tsa.filters.hpfilter(cad_jpy_df["Price"]) # + # Create a dataframe of just the exchange rate price, and add columns for "noise" and "trend" series from above: # YOUR CODE HERE! decomposition = cad_jpy_df[["Price"]].copy() decomposition["noise"] = noise decomposition["trend"] = trend decomposition.head() # + # Plot the Exchange Rate Price vs. the Trend for 2015 to the present # YOUR CODE HERE! decomposition[["Price","trend"]]["2015-01-01":].plot() # - # **Question:** Do you see any patterns, long-term and/or short? # **Answer:** The price seems to be reducing in the long term trend, however data is very volatile. # + # Plot the Settle Noise # YOUR CODE HERE! noise.plot() # - # --- # # Forecasting Returns using an ARMA Model # Using exchange rate *Returns*, estimate an ARMA model # # 1. ARMA: Create an ARMA model and fit it to the returns data. Note: Set the AR and MA ("p" and "q") parameters to p=2 and q=1: order=(2, 1). # 2. Output the ARMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Plot the 5-day forecast of the forecasted returns (the results forecast from ARMA model) # Create a series using "Price" percentage returns, drop any nan"s, and check the results: # (Make sure to multiply the pct_change() results by 100) # In this case, you may have to replace inf, -inf values with np.nan"s returns = (cad_jpy_df[["Price"]].pct_change() * 100) returns = returns.replace(-np.inf, np.nan).dropna() returns.tail() # + import statsmodels.api as sm # Estimate and ARMA model using statsmodels (use order=(2, 1)) arma_model = sm.tsa.ARMA(returns,order=(2,1)) # Fit the model and assign it to a variable called results results = arma_model.fit() # + # Output model summary results: # YOUR CODE HERE! results.summary() # + # Plot the 5 Day Returns Forecast # YOUR CODE HERE! pd.DataFrame(results.forecast(steps=5)[0]).plot() # - # **Question:** Based on the p-value, is the model a good fit? # # **Answer:** It does not look a good git. # --- # # Forecasting the Exchange Rate Price using an ARIMA Model # 1. Using the *raw* CAD/JPY exchange rate price, estimate an ARIMA model. # 1. Set P=5, D=1, and Q=1 in the model (e.g., ARIMA(df, order=(5,1,1)) # 2. P= # of Auto-Regressive Lags, D= # of Differences (this is usually =1), Q= # of Moving Average Lags # 2. Output the ARIMA summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Plot a 5 day forecast for the Exchange Rate Price. What does the model forecast predict will happen to the Japanese Yen in the near term? # + from statsmodels.tsa.arima_model import ARIMA # Estimate and ARIMA Model: # Hint: ARIMA(df, order=(p, d, q)) arima_model = ARIMA(cad_jpy_df["Price"],order = (5,1,1)) # Fit the model results = arima_model.fit() # + # Output model summary results: # YOUR CODE HERE! results.summary() # + # Plot the 5 Day Price Forecast # YOUR CODE HERE! pd.DataFrame(results.forecast(steps=5)[0]).plot() # - # **Question:** What does the model forecast will happen to the Japanese Yen in the near term? # # **Answer:** It is a clear trend that Yen is losing its value in a consistent way. # --- # # Volatility Forecasting with GARCH # # Rather than predicting returns, let's forecast near-term **volatility** of Japanese Yen exchange rate returns. Being able to accurately predict volatility will be extremely useful if we want to trade in derivatives or quantify our maximum loss. # # Using exchange rate *Returns*, estimate a GARCH model. **Hint:** You can reuse the `returns` variable from the ARMA model section. # # 1. GARCH: Create an GARCH model and fit it to the returns data. Note: Set the parameters to p=2 and q=1: order=(2, 1). # 2. Output the GARCH summary table and take note of the p-values of the lags. Based on the p-values, is the model a good fit (p < 0.05)? # 3. Plot the 5-day forecast of the volatility. import arch as arch # + # Estimate a GARCH model: arch_model = arch.arch_model(returns,mean = "Zero", vol="GARCH", p=2, q=1) # Fit the model results = arch_model.fit() # + # Summarize the model results # YOUR CODE HERE! results.summary() # - # **Note:** Our p-values for GARCH and volatility forecasts tend to be much lower than our ARMA/ARIMA return and price forecasts. In particular, here we have all p-values of less than 0.05, except for alpha(2), indicating overall a much better model performance. In practice, in financial markets, it's easier to forecast volatility than it is to forecast returns or prices. (After all, if we could very easily predict returns, we'd all be rich!) # Find the last day of the dataset last_day = returns.index.max().strftime('%Y-%m-%d') last_day # + # Create a 5 day forecast of volatility forecast_horizon = 5 # Start the forecast using the last_day calculated above # YOUR CODE HERE! forecasts = results.forecast(start = last_day, horizon = forecast_horizon) # - # Annualize the forecast intermediate = np.sqrt(forecasts.variance.dropna() * 252) intermediate.head() # Transpose the forecast so that it is easier to plot final = intermediate.dropna().T final.head() # + # Plot the final forecast # YOUR CODE HERE! final.plot() # - # **Question:** What does the model forecast will happen to volatility in the near term? # # **Answer:** Volatality tend to increase in a consisten manner. # --- # # Conclusions # 1. Based on your time series analysis, would you buy the yen now? # # * As yen is going to lose its value in a consistent way, we should not buy yen. # 2. Is the risk of the yen expected to increase or decrease? # * As volatality is predicted to increase, the risk is going to increase. # 3. Based on the model evaluation, would you feel confident in using these models for trading? # * Based on the p-values and given fluctuations I would not be comfortable using these model.
time_series_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np # + import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline # 한글폰트 사용시 그래프에서 마이너스 폰트가 깨지는 문제에 대한 대처 mpl.rcParams['axes.unicode_minus'] = False # - df = pd.DataFrame() # + # df.plot? # - # # ## kind : str # ### The kind of plot to produce: # # - 'line' : line plot (default) # - 'bar' : vertical bar plot # - 'barh' : horizontal bar plot # - 'hist' : histogram # - 'box' : boxplot # - 'kde' : Kernel Density Estimation plot # - 'density' : same as 'kde' # - 'area' : area plot # - 'pie' : pie plot # - 'scatter' : scatter plot # - 'hexbin' : hexbin plot. ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts ts.plot() ts = ts.cumsum() # 누적합 ts.plot() df = pd.DataFrame(np.random.randn(1000,4), index=ts.index, columns=list('ABCD')) df df.shape df.plot() df = df.cumsum() df.plot() df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B','C']).cumsum() df3.head() df3['A'] = pd.Series(list(range(len(df)))) df3.head() df3.plot(x='A', y='B')
chapter_1/.ipynb_checkpoints/Pandas Plot-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 83} colab_type="code" id="9QmzDwoSZElQ" outputId="a895f242-1d95-481d-b454-00c8923c27f2" import numpy as np #import keras import tensorflow from sklearn import metrics from numpy import mean,std,dstack from pandas import read_csv from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, TimeDistributed, Conv1D, MaxPooling1D from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import LSTM from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import os,csv # + # import os # os.environ['CUDA_VISIBLE_DEVICES'] = '/device:GPU:0' # + colab={} colab_type="code" id="AFW_LUejZHs-" # 定义读取数据函数,转换为DataFrame数据结构,返回Numpy数组。 def load_file(filepath): dataframe = read_csv(filepath, header=None, delim_whitespace=True) return dataframe.values # + colab={} colab_type="code" id="jQ5gMpt5ZKh2" # 定义 pandas读取 UCI HAR 数据集中的所有数据的函数; # 数组的维数为 [样本,时间步长,特征]([sample,timestamp,features]) # numpy 的 dstack()方法将输入数据按照第三维堆叠。 def load_group(filenames, prefix=''): loaded = list() for name in filenames: data = load_file(prefix + name) loaded.append(data) # stack group so that features are the 3rd dimension loaded = dstack(loaded) return loaded # + colab={} colab_type="code" id="2A3Bo_ygZMl-" # 加载数据集,返回数据集X和标签y def load_dataset_group(group, prefix=''): filepath = prefix + group + '/Inertial Signals/' # 加载所有9个文件转换为一维数组 filenames = list() # 重力加速度信号 filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt'] # 身体加速度信号 filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt'] # 陀螺仪读取的身体角速度信号 filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt'] # 加载输入数据 X = load_group(filenames, filepath) # 加载分类标记 y = load_file(prefix + group + '/y_' + group + '.txt') return X, y # + colab={} colab_type="code" id="492yTD5cZQn2" # 加载数据集,并返回训练集和测试集 def load_dataset(prefix=''): # 加载训练数据集 trainX, trainy = load_dataset_group('train', prefix + 'D:/GraduationCode/01 Datasets/UCI HAR Dataset/') print(trainX.shape, trainy.shape) # 加载测试数据集 testX, testy = load_dataset_group('test', prefix + 'D:/GraduationCode/01 Datasets/UCI HAR Dataset/') print(testX.shape, testy.shape) # 原数据集标签为1到6,而one-hot编码中类别起始序号为0,减去一以使得分类数为6 trainy = trainy - 1 testy = testy - 1 # 使用keras的方法对分类进行one-hot编码 trainy = to_categorical(trainy) testy = to_categorical(testy) print(trainX.shape, trainy.shape, testX.shape, testy.shape) return trainX, trainy, testX, testy # + colab={} colab_type="code" id="5xUvR5-ZZWjm" # 拟合评估模型 def evaluate_model(trainX, trainy, testX, testy): verbose, epochs, batch_size = 1, 100, 64 # verbose, epochs, batch_size = 1, 5, 64 n_timesteps, n_features, n_outputs = trainX.shape[1], trainX.shape[2], trainy.shape[1] """ CNN-LSTM模型将主序列的子序列作为块读取,从每个块中提取特征,然后让LSTM来解释从每个块中提取的特征。 实现此模型的一种方法是将128个时间步长的窗口分割成子序列,以便CNN模型处理。 例如,每个窗口中的128个时间步可以分成4个32个时间步的子序列。 """ n_steps, n_length = 4, 32 trainX = trainX.reshape((trainX.shape[0], n_steps, n_length, n_features)) testX = testX.reshape((testX.shape[0], n_steps, n_length, n_features)) # 定义模型 #model = Sequential() # 顺序模型 # 为了在TF2.1使用tensorboard model = tensorflow.keras.Sequential() """ CNN模型包装在TimeDistributed层中, 以允许相同的CNN模型在窗口中读取四个子序列中的每个子序列。 """ model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'), input_shape=(None, n_length, n_features))) model.add(TimeDistributed(Conv1D(filters=64, kernel_size=3, activation='relu'))) model.add(TimeDistributed(Dropout(0.5))) model.add(TimeDistributed(MaxPooling1D(pool_size=2))) # Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡。 # 是将一个维度大于或等于3的高维矩阵,“压扁”为一个二维矩阵。即保留第一个维度。 model.add(TimeDistributed(Flatten())) model.add(LSTM(100)) model.add(Dropout(0.5)) model.add(Dense(100, activation='relu')) # softmax激活函数:本质是离散概率分布,适用于多分类任务。 model.add(Dense(n_outputs, activation='softmax')) # 编译模型 # 交叉熵损失衡量的是实际输出与期望输出的距离,交叉熵的值越小,两个概率分布就越接近。 # 损失函数:二分类binary_crossentropy,多分类:categorial_crossentropy/sparse_categorical_crossentropy model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # 控制台打印网络结构 print(model.summary()) # 保存网络结构图 from tensorflow.keras.utils import plot_model plot_model(model,to_file='CNN-LSTM-HAR.png',show_shapes=True,dpi=300) # 构造Tensorboard类的对象 #summary = tf.keras.callbacks.TensorBoard(log_dir="D:/GraduationCode/02-Jason-HAR-Pandas-9axis-Keras/cnn_lstm_logs/",histogram_freq=1) logdir = os.path.join('cnn_lstm_logs') summary = tensorflow.keras.callbacks.TensorBoard(log_dir=logdir,histogram_freq=1) history = model.fit(trainX, trainy, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_data=(testX, testy), callbacks=[summary]) # 模型保存 model.save('uci_har_cnn-lstm.h5') # 模型测试 test_model(model, testX, verbose, batch_size, n_outputs) # 模型评估 _, accuracy = model.evaluate(testX, testy, batch_size=batch_size, verbose=verbose) model.evaluate(testX, testy, batch_size=batch_size) print("model.metrics_names:{}".format(model.metrics_names)) return accuracy,history # + colab={} colab_type="code" id="wt-jpJ3nc88I" # 定义测试模型 def test_model(model, testX, verbose, batch_size, n_outputs): prediction_list = model.predict(testX, batch_size=batch_size, steps=None, verbose=verbose) predictions_transformed = np.eye(n_outputs, dtype=int)[np.argmax(prediction_list, axis=1)] np.savetxt('cnn_lstm_predictions.txt', prediction_list) np.savetxt('cnn_lstm_predictions_trans.txt', (np.argmax(predictions_transformed, axis=1))) # + colab={} colab_type="code" id="AOYWdSzOe2bi" def plot_accuracy(history): acc = history.history['accuracy'] val_acc = history.history['val_accurary'] epochs = range(1, len(acc) + 1) plt.figure(dpi=200) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'orange', label='Validation acc') plt.title('Training and validation accuracy') plt.legend() # + colab={} colab_type="code" id="Ezl0rM0ho9rs" def plot_loss(history): acc = history.history['accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) plt.figure(dpi=200) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'orange', label='Validation loss') plt.title('Training and validation loss') plt.legend() # + colab={} colab_type="code" id="_KsPt0H8c-VX" # summarize scores def summarize_results(scores): print(scores) m, s = mean(scores), std(scores) print('Accuracy: %.3f%% (+/-%.3f)' % (m, s)) # + colab={} colab_type="code" id="PGo4OR_8dBWI" def plot_predictions(): x = list(range(1, 101)) actual = read_csv("D:/GraduationCode/01 Datasets/UCI HAR Dataset/test/y_test.txt", nrows=100, header=None, delim_whitespace=True) predicted = read_csv("predictions_trans.txt", nrows=100, header=None, delim_whitespace=True) predicted = ([x+1 for x in np.array(predicted)]) plt.figure(dpi=200) plt.plot(x, actual) plt.plot(x, predicted, color='r') plt.show() with open('D:/GraduationCode/01 Datasets/UCI HAR Dataset/test/y_test.txt', newline='') as csvfile: actualok = list(csv.reader(csvfile)) with open('cnn_lstm_predictions_trans.txt', newline='') as csvfile: predictedok = list(csv.reader(csvfile)) print("Confusion Matrix:") confusion_matrix = metrics.confusion_matrix(actualok, predictedok) print(confusion_matrix) normalised_confusion_matrix = np.array(confusion_matrix, dtype=np.float32)/np.sum(confusion_matrix)*100 # Plot Results: width = 8 height = 8 plt.figure(figsize=(width, height),dpi=200) plt.imshow( normalised_confusion_matrix, interpolation='nearest', cmap=plt.cm.rainbow ) plt.title("Confusion matrix \n(normalised to % of total test data)") print(normalised_confusion_matrix) LABELS = [ "WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING" ] plt.colorbar() tick_marks = np.arange(6) plt.xticks(tick_marks, LABELS, rotation=90) plt.yticks(tick_marks, LABELS) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') plt.show() # + colab={} colab_type="code" id="MWvXMxCwdDQ_" def run_experiment(repeats=5): # def run_experiment(repeats=1): trainX, trainy, testX, testy = load_dataset() #repeat experiment scores=list() for r in range(repeats): score,history = evaluate_model(trainX, trainy, testX, testy) score=score*100.0 print("Accuracy : ", score) plot_predictions() plot_accuracy(history) plot_loss(history) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="y-94tHEfdFaP" outputId="fcb9986f-c573-402c-c93c-fd0611dbbfc7" run_experiment() # + colab={"base_uri": "https://localhost:8080/", "height": 407} colab_type="code" id="_QHvzypNon1Y" outputId="8555d25c-bb42-437e-afe8-36c7bf98309b" import pandas as pd df = pd.read_csv('predictions_trans.txt', sep=" ", header=None, names=["Prediction"]) temp_dict={0:'Walking', 1:'Walking-upstairs', 2:'Walking-downstairs', 3:'Sitting', 4:'Standing', 5:'Laying'} df['Prediction']=df.Prediction.map(temp_dict) df # + colab={} colab_type="code" id="ttOh2NeaTvWU" df.to_csv(r'cnn_lstm_activity_predictions.txt', header=None, index=None, sep='\t', mode='a')
06 Time Series Classification/01 UCI-HAR CNN-LSTM Keras 01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lists myList = ["banana", "cherry", "apple"] print(myList) myList2 = [5, True, "apple", "apple"] print(myList2) # print element with index 0 item1 = myList[0] print(item1) # print second element from the end item2 = myList[-2] print(item2) # go through the list with for loop for i in myList: print(i) # insert a new element to the list with index myList.insert(2, "bluberry") print(myList) # pop element (remove it) and print it item3 = myList.pop() print(item3) print(myList) # sort a list new_list = sorted(myList) print(myList) print(new_list) # list of 5 zeros myList3 = [0]*5 print(myList3) # concatenate lists myList4 = [1, 2, 3, 4, 5] new_list2 = myList3 + myList4 print(new_list2) # reverse a list with slicing and -1 myList5 = [1, 2, 3, 4, 5, 6, 7, 8, 9] a = myList5[::-1] print(a) # # copy a list 1 list_original = ["banan", "cherry", "apple"] list_copy = list_original.copy() list_copy.append("lemon") print(list_copy) print(list_original) # # copy a list 2 list_original = ["banan", "cherry", "apple"] list_copy = list(list_original) list_copy.append("lemon") print(list_copy) print(list_original) # # copy a list 3 list_original = ["banan", "cherry", "apple"] list_copy = list_original[:] list_copy.append("lemon") print(list_copy) print(list_original) # list comprehension myList6 = [1, 2, 3, 4, 5, 6] c = [number*number for number in myList6] print(myList6) print(c)
01_Lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="B3OfNRQA3Gt8" # # Import Character Vocab # # + id="WOlG2SQfrlOw" import numpy as np import json import tensorflow as tf import pandas as pd # - gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_visible_devices(gpus[0], 'GPU') tf.config.experimental.set_memory_growth(gpus[0], True) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Yq7h6WJTs89M" outputId="1030b00a-bffd-4b49-d302-be9db7b202c8" with open('files/vocab.json', 'r') as f: CHAR_INDICES = json.load(f) # + colab={"base_uri": "https://localhost:8080/", "height": 54} id="iKdOILCutjVM" outputId="b5739165-d224-43fd-a4e3-a313f95f8fa6" print(CHAR_INDICES) # + [markdown] id="iGUQS25OVJIJ" # # Preprocessing text data # + [markdown] id="CsUEAfwPiMj1" # ## look_back # + id="IN2uPnKJdr0h" look_back = 10 # + id="wYP03g3TrMXb" def create_dataset(text, look_back = look_back): """ take text with label (text that being defined where to cut ('|')) and encode text and make label return preprocessed text & preprocessed label """ X, y = [], [] text = '|' + text data = [CHAR_INDICES['<pad>']] * look_back for i in range(1, len(text)): current_char = text[i] before_char = text[i-1] if current_char == '|': continue data = data[1:] + [CHAR_INDICES[current_char]] # X data target = 1 if before_char == '|' else 0 # y data X.append(data) y.append(target) return np.array(X), tf.one_hot(y, 2) # + id="LhL81QrDazGn" def text_pred_preprocessing(text, sequence_len=look_back): """ take unseen (testing) text and encode it with CHAR_DICT //It's like create_dataset() but not return label return preprocessed text """ X = [] data = [CHAR_INDICES['<pad>']] * sequence_len for char in text: char = char if char in CHAR_INDICES else '<unk>' # check char in dictionary data = data[1:] + [CHAR_INDICES[char]] # X data X.append(data) return np.array(X) # + id="98YuegoQyZvQ" def word_tokenize(text, class_): cut_indexs = [] words = [] # boolean index of each word 1 if cut before class_ = np.append(class_, 1) # if y_label at i is 1 so add i (index) to cut_indexs for i, value in enumerate(class_): if value == 1: cut_indexs.append(i) # add word after cutting till before ext cutting for i in range(len(cut_indexs)-1): words.append(text[cut_indexs[i]:cut_indexs[i+1]]) return words # + id="uaKNlRBTNtwy" def decode_label(y): return tf.argmax(y, axis=-1).numpy() # + [markdown] id="SqK1KQFO2SgX" # ## Import Dataset # - df_train = pd.read_csv('../Making_Datasets/dataframe/df_train.csv') df_train.head() df_val = pd.read_csv('../Making_Datasets/dataframe/df_val.csv') df_val.head() df_test= pd.read_csv('../Making_Datasets/dataframe/df_test.csv') df_test.head() def prepare_text_dataset(arr_iupac, arr_label): return ' '.join(arr_iupac), '|'.join(arr_label) # + text_train, text_cut_train = prepare_text_dataset(df_train['iupacname'].values, df_train['label'].values) text_val, text_cut_val = prepare_text_dataset(df_val['iupacname'].values, df_val['label'].values) text_test, text_cut_test = prepare_text_dataset(df_test['iupacname'].values, df_test['label'].values) print(text_train[:10], '++', text_cut_train[:20]) print(text_val[:10], '++', text_cut_val[:20]) print(text_test[:10], '++', text_cut_test[:20]) # + colab={"base_uri": "https://localhost:8080/", "height": 50} id="u2afRpduVvog" outputId="8172ad0e-56d0-4238-80a0-f9bce2847802" X_train ,y_train = create_dataset(text_cut_train) X_val, y_val = create_dataset(text_cut_val) X_test, y_test = create_dataset(text_cut_test) print(X_train.shape, y_train.shape) print(X_val.shape, y_val.shape) print(X_test.shape, y_test.shape) # + id="ZLZO-FPGlzz_" training_data = tf.data.Dataset.from_tensor_slices((tf.cast(X_train, tf.float32),y_train)) training_data = training_data.shuffle(222623).batch(128).cache().prefetch(tf.data.experimental.AUTOTUNE) validation_data = tf.data.Dataset.from_tensor_slices((tf.cast(X_val, tf.float32), y_val)) validation_data = validation_data.shuffle(47381).batch(128).cache().prefetch(tf.data.experimental.AUTOTUNE) testing_data = tf.data.Dataset.from_tensor_slices((tf.cast(X_test, tf.float32), y_test)) testing_data = testing_data.shuffle(48552).batch(128).cache().prefetch(tf.data.experimental.AUTOTUNE) # + [markdown] id="4SxmThNeVVg4" # # Create Model # + [markdown] id="1GK3s02l336w" # ## Model Architecture # + id="yecuD8XIVXVn" from tensorflow.keras.layers import Bidirectional, LSTM, Dense, Embedding, GRU, Dropout, Conv1D, MaxPooling1D from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Sequential # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="ZLVuON-cdnSQ" outputId="b951da14-6d76-4c79-e349-13aa023e4b6f" _input_shape = (look_back, len(CHAR_INDICES)) print(_input_shape[1]) # + id="A0jAOIBzdDYJ" Model = Sequential( [ tf.keras.layers.Input((look_back,), dtype=tf.float16), Embedding(len(CHAR_INDICES), 64, input_length= look_back), #Conv1D(filters=64, kernel_size=5, padding='same', activation='relu'), #MaxPooling1D(pool_size=2), Bidirectional(GRU(32, return_sequences=False), merge_mode='sum'), Dropout(0.5), Dense(2, activation='softmax'), tf.keras.layers.Activation('softmax', dtype=tf.float32) ], name='model' ) # + colab={"base_uri": "https://localhost:8080/", "height": 415} id="WifMOz_Kextr" outputId="d039b7a6-3db3-467e-cc2a-63cf6c79df72" Model.summary() # + # tf.keras.utils.plot_model(Model, to_file='model.png', show_shapes=True, dpi=60) # + [markdown] id="QNzpxjEb38LI" # ## Training Model (with callback) # # + # Stacking Bi-GRU ## (9s) loss: 0.3269 - accuracy: 0.9988 - val_loss: 0.3287 - val_accuracy: 0.9974 # Bi-GRU ## (7s) loss: 0.3277 - accuracy: 0.9982 - val_loss: 0.3292 - val_accuracy: 0.9970 # Stacking Bi-LSTM ## (9s) loss: 0.3269 - accuracy: 0.9988 - val_loss: 0.3290 - val_accuracy: 0.9974 # Bi-LSTM ## (6s) loss: 0.3280 - accuracy: 0.9981 - val_loss: 0.3297 - val_accuracy: 0.9968 # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="uXJ10S4MfBgN" outputId="57e4935d-f7a7-4829-e67f-bdd183ddae97" Model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001), loss= tf.keras.losses.CategoricalCrossentropy(label_smoothing = 0.2), metrics=['accuracy'] ) checkpoint_path = 'save_models/best_model.hdf5' checkpoint_callback = tf.keras.callbacks.ModelCheckpoint( filepath=checkpoint_path, include_optimizer=False, monitor='val_accuracy', mode='max', verbose=0, save_best_only=True ) earlystop_callback = tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=5, verbose=1, restore_best_weights=True ) reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) callback_list = [earlystop_callback, reduce_lr] # - history = Model.fit(training_data, validation_data=validation_data, epochs=100, callbacks=callback_list) # + X_test, y_test = create_dataset(text_cut_test) print(y_test.shape) Model.evaluate(X_test, y_test) # + [markdown] id="L8DssGfR2LKG" # ## Plot loss function and accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 354} id="W5wd1CJWscCl" outputId="66842ba9-2cf2-4734-f849-0cb70a1d55a7" print(type(history.history)) print(history.history.keys()) import matplotlib.pyplot as plt plt.figure(figsize=(8, 5)) plt.plot(history.history['loss'], label='training_loss') plt.plot(history.history['val_loss'], label='validation_loss') plt.legend() plt.show() # - plt.plot(history.history['accuracy'], label='training_acc') plt.plot(history.history['val_accuracy'], label='validation_acc') plt.legend() plt.show() # + [markdown] id="KPG4YEdh4AiH" # # Import model and Test on unseen data # - # + id="yDyk5ksCeDOK" best_model = tf.keras.models.load_model('save_models/best_model.hdf5') #best_model.compile(optimizer = tf.keras.optimizers.Nadam(learning_rate=0.0007), loss='categorical_crossentropy', metrics=['accuracy']) # + id="6KsMjtRetqLL" test_data_text = '3-[3-(bromomethyl)-4-hydroxyphenyl]propanoic acid' test_data_text_cut = '3|-|[|3|-|(|bromo|meth|yl|)|-|4|-|hydroxy|phen|yl|]|prop|an|oic acid' # + colab={"base_uri": "https://localhost:8080/", "height": 151} id="WWj1_5XS30p_" outputId="3c112298-265f-46c5-9c61-83e978083095" _, my_y = create_dataset(test_data_text_cut) myText_test = text_pred_preprocessing(test_data_text) pred_test_proba = best_model.predict(myText_test) pred_test = decode_label(pred_test_proba) pred_test[0] = 1 my_y_decode = decode_label(my_y) print(pred_test) #print(my_y_decode) # Count same item between y (label) and pred (prediction) elem_same = (pred_test == my_y_decode).sum() print("\nSame =",elem_same,", Not Same =",pred_test.shape[0]-elem_same) # + colab={"base_uri": "https://localhost:8080/", "height": 70} id="jiGlram741Ne" outputId="c2c6be58-b055-4218-d889-5888a2742779" words = word_tokenize(test_data_text, pred_test) print(words) print('|'.join(words)) # + [markdown] id="e6CR383emWEt" # # Function Confusion Matrix visualization # + [markdown] id="PijmyACBmf0e" # # Plot Confusion Matrix # + colab={"base_uri": "https://localhost:8080/", "height": 337} id="WenTnLTpmp_4" outputId="9f8d8576-ee8f-46f2-cbe9-63e47d92e79a" from nami.visualize import plot_confusion_matrix labels = ["True Neg","False Pos","False Neg","True Pos"] categories = ["Zero", "One"] plot_confusion_matrix(tf.math.confusion_matrix(my_y_decode, pred_test, num_classes=2).numpy(), group_names=labels, categories=categories, cmap='YlGnBu') # + [markdown] id="AIFvoxuOZE7J" # ## BLEU Score # + colab={"base_uri": "https://localhost:8080/", "height": 54} id="k42mTNkqZyA2" outputId="7186287c-3356-4eb6-85b8-11c56171b62f" reference = test_data_text_cut.split('|') print(reference) # + colab={"base_uri": "https://localhost:8080/", "height": 34} id="NTRwxpmRZZKh" outputId="c3665a2b-ea77-4c45-b0e5-0c33805ddf5e" import nltk score = nltk.translate.bleu_score.sentence_bleu([reference],words) print(score)
notebooks/ORG_Chem_Word_Segmentation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Evaluation Hand Detection Pipeline # # This scripts tests the hand detection and gesture classification models in conjunction. # The test setup has been set up exactly like the inference setup when controlling the JetBot. # An input image of size 300 x 300 px is passed first to the detection model. # The detection model detects hands in the image if there are any. # Then the cutout is being resized to 64 x 64 px and passed on to the classification model. # These hands are then classified as a gesture. # # A confidence threshold has been defined for the hand detection model. The same threshold is used for inference in # the demo case for controlling the JetBot. # # All needed adjustments are marked with "Todo". # + import os from pathlib import Path import matplotlib.pyplot as plt import numpy as np import cv2 import tensorflow as tf from sklearn.metrics import classification_report, confusion_matrix, accuracy_score # + # ToDo: Only necessary if you changed the selected gestures in the script data_preprocessing_lared.ipynb. # adjust the names and quantity of the gestures here too. INDEX_TO_LABEL = { 0 :"crawl", 1 :"fist", 2 :"five", 3 :"palm", 4 :"peace", } IMAGE_HEIGHT = 300 IMAGE_WIDTH = 300 NUMBER_OF_CLASSES = 5 # Todo: change path to the test directory wich contains uncorpped images TEST_DIR_300x300 = "C:/datasets/lared/300x300/test" THRESHOLD_HAND_DETECTION = 0.8 # - # ### Load data # + def images_to_array(path): """ This method takes a path containing images in different folders representing the classes. Parameters: path (str): path to image folder Returns: Two arrays containing the images and corresponding labels """ images = [] labels = [] curr_label = 0 for dir_gesture in os.listdir(path): curr_dir = os.path.join(path, dir_gesture) curr_files = os.listdir(curr_dir) print(dir_gesture) for file in curr_files: img_path = os.path.join(curr_dir, file) image_as_array = cv2.imread(img_path) image_as_array = cv2.cvtColor(image_as_array, cv2.COLOR_BGR2RGB) images.append(image_as_array) labels.append(tf.keras.utils.to_categorical(curr_label,NUMBER_OF_CLASSES)) curr_label = curr_label + 1 return images, labels test_images300x300, test_labels300x300 = images_to_array(TEST_DIR_300x300) print("image_data_format()", tf.keras.backend.image_data_format()) print("Test images ", len(test_images300x300), np.array(test_images300x300[0]).shape, " Test labels ", len(test_labels300x300)) # - # ### Load models # # The detection and classification models are needed. # + # hand detection model model_dir = Path("../2_detection/model_ssd_mobilenetV2") model_detect = tf.saved_model.load(str(model_dir)) model_detect = model_detect.signatures['serving_default'] print("hand detection model loaded") # gesture classification model model_dir = Path("../3_classification/model_mobilenet") model_classify = tf.saved_model.load(str(model_dir)) print("gesture classification model loaded") # - # ### Helper methods # + def get_detected_hand_boxes(img): """ Takes in image of arbitrary size and returns bounding boxes along with confidence scores after detecting hands in the image. """ input_tensor = tf.convert_to_tensor(img) input_tensor = input_tensor[tf.newaxis,...] hand_detections = model_detect(input_tensor) boxes, hand_scores = filter_detected_boxes(hand_detections, threshold=THRESHOLD_HAND_DETECTION) return boxes, hand_scores def filter_detected_boxes(hand_detections, threshold): """ Filter the detected hands according to defined threshold. Only bounding boxes that have been detected above the threshold will be returned. Returns: boxes: Bounding boxes of detected hands hand_scores: Probability that the detected object is a hand """ boxes_raw = hand_detections['detection_boxes'].numpy()[0] scores = hand_detections['detection_scores'].numpy()[0] boxes = [] hand_scores = [] for i, score in enumerate(scores): if score > threshold: box = boxes_raw[i] ymin = int(float(box[0])*IMAGE_HEIGHT) xmin = int(float(box[1])*IMAGE_WIDTH) ymax = int(float(box[2])*IMAGE_HEIGHT) xmax = int(float(box[3])*IMAGE_WIDTH) score = round(score,2) hand_scores.append(score) boxes.append((xmin, ymin, xmax, ymax)) return boxes, hand_scores def add_boxes_to_img(img, boxes): """ Draw bounding boxes on an image The calculated probabilities for a hand detection or a gesture classification will be added to the image. The gesture names will be drawn on the image. Input: img: Image that contains the bounding boxes boxes: The bounding boxes to draw hand_scores: The probability of a detected hand gesture_names: The names of classified gestures gesture_scores: The probability of a classified gesture Returns: img: The input image with drawn bounding boxes and scores """ for box in boxes: print(box) (xmin, ymin, xmax, ymax) = box img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0,205,205), 1) return img def crop_rect(img, xmin, xmax, ymin, ymax): """ Crop the the rectangular image to a square Input: img: Image that contains the bounding box xmin, xmax, ymin, ymax: Coordinates of a hand bounding box Returns: img: Input image cropped to a square """ x, y = xmin, ymin w = xmax - xmin # width h = ymax - ymin # height # crop a square form if w > h: y = y - int((w-h)/2) h = w #make sure y is within picture y = max(y,0) y = min(y, IMAGE_HEIGHT-h) elif h > w: x = x - int((h-w)/2) w = h #make sure x is within picture x = max(x,0) x = min(x,IMAGE_WIDTH-w) img = img[y:y+h, x:x+w] img = cv2.resize(img, (64,64)) return img # + [markdown] pycharm={"name": "#%% md\n"} # ### Test how many hands per image are detected # # All input images contain exactly one hand. # When running the detection model with the defined threshold of 0.8 one hand is not detected. In all other cases # exactly one hand is detected per image. # + count_no_box_detected = count_exactly_one_box_detected = count_2_and_more_boxes_detected = 0 for img in test_images300x300: boxes, hand_scores = get_detected_hand_boxes(img) if len(boxes) == 0: count_no_box_detected += 1 print("No hand in this image detected!") plt.imshow(img) plt.show() elif len(boxes) == 1: count_exactly_one_box_detected += 1 else: count_2_and_more_boxes_detected += 1 print("Total statistics for detected hands") print("Count images with no boxes detected: ", count_no_box_detected) print("Count images with exactly one box detected:", count_exactly_one_box_detected) print("Count images with two or more box detected:", count_2_and_more_boxes_detected) # - # Print an arbitrary image along with the detected hand. index = 200 img = test_images300x300[index] boxes, hand_scores = get_detected_hand_boxes(img) img = add_boxes_to_img(img, boxes) plt.imshow(img) # ### Test detection and classification pipeline combined def classify_gesture(img): # preprocess image img = img/255 img = img.astype('float32') input_tensor = tf.convert_to_tensor(img) input_tensor = input_tensor[tf.newaxis,...] # run classification model prediction = model_classify(input_tensor) # format prediction predicted_index = np.argmax(prediction, axis=1)[0] return predicted_index # + [markdown] pycharm={"name": "#%% md\n"} # Classify all detected hands. # # The code below runs the detection model on each images cropping the hand and passing it on to the classification model. # When a hand is not detected a label -1 is assigned to the image instead of trying to classify it. # The new label -1 "no gesture" is also used to generate the confusion matrix. # # This metrics represent the accuracy of the whole detection and classification pipeline. # + pycharm={"name": "#%%\n"} labels = test_labels300x300 images_detected_hands = [] # the cropped images by the detection model are being saved to show detection errors later y_pred = [] for img in test_images300x300: # detect hand boxes, _ = get_detected_hand_boxes(img) # only run classification model, when exactly one hand is detected if len(boxes) == 1: xmin, ymin, xmax, ymax = boxes[0] # crop detected hand img = crop_rect(img, xmin, xmax, ymin, ymax) # classify gesture y_pred.append(classify_gesture(img)) # if not exactly one hand is detection, the classification is considered wrong else: print("skipping image since no hand detected, label is set to -1") plt.imshow(img) plt.show() y_pred.append(-1) images_detected_hands.append(img) y_true = np.argmax(labels, axis=1) print('Confusion Matrix') print(confusion_matrix(y_true, y_pred)) print("\n Accuracy", accuracy_score(y_true, y_pred)) print('\n Classification Report') target_names = ['no gesture', 'crawl','fist', 'five', 'palm', 'peace'] print(classification_report(y_true, y_pred, target_names=target_names, digits=4)) # + [markdown] pycharm={"name": "#%% md\n"} # ### Show misclassified images # # When looking at misclassified images its interesting to find out what the reason of the error is. # Is it the wrongly cropped image or something else? For this reason the the classification test set is # loaded here for comparison reasons. It contains the manually cropped images with the help of the mask. # This allows to show the cropped images by the detection model and the manually cropped images side by side. # + pycharm={"name": "#%%\n"} # Todo: change path to the test directory which contains manually cropped images, cropped with the help of mask files # These images are being used only for comparison reasons TEST_DIR_64x64 = "C:/datasets/lared/cropped_gesture64x64/test" test_images64x64, test_labels64x64 = images_to_array(TEST_DIR_64x64) print("image_data_format()", tf.keras.backend.image_data_format()) print("Test images ",len(test_images64x64), np.array(test_images64x64[0]).shape, " Test labels ", len(test_labels64x64))
howto/4_evaluation/evaluation_end_to_end.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ms-python.python added import os try: os.chdir(os.path.join(os.getcwd(), 'hw3')) print(os.getcwd()) except: pass # # Homework 3 - <NAME> - A53266114 import random import numpy as np import scipy import scipy.optimize # + import gzip from collections import defaultdict def readGz(path): for l in gzip.open(path, 'rt'): yield eval(l) def readCSV(path): f = gzip.open(path, 'rt') f.readline() for l in f: yield l.strip().split(',') def MSE(predictions, labels): differences = [(x-y)**2 for x,y in zip(predictions,labels)] return sum(differences) / len(differences) # + bookCount = defaultdict(int) bookSet = set() totalRead = 0 user_read = defaultdict(set) data = [] for user,book,_ in readCSV("train_Interactions.csv.gz"): bookCount[book] += 1 bookSet.add(book) totalRead += 1 user_read[user].add(book) data.append((user, book)) # - # ## 1. # # training = data[:190000] validation = data[190000:] validation_new = [] for user, book in validation: sample = random.sample(bookSet.difference(user_read[user]), 1)[0] validation_new.append([user, book, 1]) validation_new.append([user, sample, 0]) validation = np.array(validation_new) # + book_user = defaultdict(set) user_read = defaultdict(set) for user, book in training: book_user[book].add(user) user_read[user].add(book) # + # baseline model mostPopular = [(bookCount[x], x) for x in bookCount] mostPopular.sort() mostPopular.reverse() popular = set() count = 0 for ic, i in mostPopular: count += ic popular.add(i) if count > totalRead * 0.58: break # - pred = [] for _, book, _ in validation: if book in popular: pred.append(1) else: pred.append(0) valid_gt = validation[:,2].astype(int) print(f"Baseline accuracy: {sum(pred==valid_gt)/len(pred)}.") # ## 2. # # Better threshold # better threshold percentile = list(range(40, 60, 1)) popularSet = [] for p in percentile: popular = set() count = 0 for ic, i in mostPopular: count += ic popular.add(i) if count > totalRead * (p/100): break popularSet.append(popular) pred = [] for _, book, _ in validation: if book in popular: pred.append(1) else: pred.append(0) print(f"Baseline accuracy: {sum(pred==valid_gt)/len(pred)}; threshold: {p} th") # The better threshold is 57th percentage from the above test. # ## 3. # # Jaccard similarity # # + def Jaccard(s1, s2): numer = len(s1.intersection(s2)) denom = len(s1.union(s2)) return numer / denom def Max_Jaccard(user, book): similarities = [] b = book_user[book] for read in user_read[user]: similarities.append(Jaccard(b, book_user[read])) return max(similarities) # - ps = np.linspace(0.005, 0.015, 11) for p in ps: pred = [] for user, book, _ in validation: sim = Max_Jaccard(user, book) if sim > p: pred.append(1) else: pred.append(0) print(f"Jaccard accuracy: {sum(pred==valid_gt)/len(pred)}; threshold: {p}") # With above test, the best the Jaccard accuracy can do is about a 62.645% accuracy with threshold of 0.011. # ## 4. # # Classifier with population and Jaccard accuracy result = [] for p in range(58,65,1): for t in np.linspace(0.004,0.010, 7): popular = set() count = 0 for ic, i in mostPopular: count += ic popular.add(i) if count > totalRead * (p/100): break pred = [] for user, book, _ in validation: sim = Max_Jaccard(user, book) if (sim > t) and (book in popular): pred.append(1) else: pred.append(0) print(f"Combined accuracy: {sum(pred==valid_gt)/len(pred)} pop ({p}), thr ({t}).") result.append((sum(pred==valid_gt)/len(pred),p,t)) result.sort() print(result[-1]) # + f = open("predictions_Read.txt", 'w') popular = set() count = 0 for ic, i in mostPopular: count += ic popular.add(i) if count > totalRead * (64/100): break for l in open("pairs_Read.txt"): if l.startswith("userID"): #header f.write(l) continue user,book = l.strip().split('-') sim = Max_Jaccard(user, book) if (sim > 0.008) and (book in popular): f.write(user + '-' + book + ",1\n") else: f.write(user + '-' + book + ",0\n") f.close() # - # The Kaggle username is "Renjie Zhu". # ## 9. # # Rating prediction # + user_rating = [] # users = [] # items = [] for user,book,rating in readCSV("train_Interactions.csv.gz"): user_rating.append((user, book, int(rating))) # users.append(user) # items.append(book) training = user_rating[:190000] validation = user_rating[190000:] # + alpha = sum([int(r) for _,_,r in training]) / len(training) userBiases = defaultdict(float) itemBiases = defaultdict(float) book_user = defaultdict(list) user_book = defaultdict(list) for u,b,r in training: book_user[u].append((u,b)) user_book[b].append((u,b)) nUsers = len(book_user) nItems = len(user_book) users = list(book_user.keys()) items = list(user_book.keys()) # - def prediction(user, item): try: return alpha + userBiases[user] + itemBiases[item] except: return alpha def unpack(theta): global alpha global userBiases global itemBiases alpha = theta[0] userBiases = dict(zip(users, theta[1:nUsers+1])) itemBiases = dict(zip(items, theta[1+nUsers:])) def cost(theta, labels, lamb): unpack(theta) predictions = [prediction(u, b) for u,b,_ in training] cost = MSE(predictions, labels) # print("MSE = " + str(cost)) for u in userBiases: cost += lamb*userBiases[u]**2 for i in itemBiases: cost += lamb*itemBiases[i]**2 return cost def derivative(theta, labels, lamb): unpack(theta) N = len(training) dalpha = 0 dUserBiases = defaultdict(float) dItemBiases = defaultdict(float) for u,b,r in training: pred = prediction(u, b) diff = pred - r dalpha += 2/N*diff dUserBiases[u] += 2/N*diff dItemBiases[b] += 2/N*diff for u in userBiases: dUserBiases[u] += 2*lamb*userBiases[u] for i in itemBiases: dItemBiases[i] += 2*lamb*itemBiases[i] dtheta = [dalpha] + [dUserBiases[u] for u in users] + [dItemBiases[i] for i in items] return np.array(dtheta) labels = [r for _,_,r in training] scipy.optimize.fmin_l_bfgs_b(cost, [alpha] + [0.0]*(nUsers+nItems), derivative, args = (labels, 1)) # + # prediction on validation set pred=[] real=[] for u,b,r in validation: pred.append(prediction(u,b)) real.append(r) print(f"The MSE on validation set is {MSE(pred,real)}.") # - # ## 10. print(f"User: largest {max(userBiases, key=userBiases.get)} ; smallest {min(userBiases, key=userBiases.get)}.") print(f"Book: largest {max(itemBiases, key=itemBiases.get)} ; smallest {min(itemBiases, key=itemBiases.get)}.") # ## 11. lamb = [1e-6,1e-5,1e-4,1e-3,1e-2] for l in lamb: scipy.optimize.fmin_l_bfgs_b(cost, [alpha] + [0.0]*(nUsers+nItems), derivative, args = (labels, l)) pred=[] real=[] for u,b,r in validation: pred.append(prediction(u,b)) real.append(r) print(f"The MSE on validation set is {MSE(pred,real)} with lambda = {l}.") scipy.optimize.fmin_l_bfgs_b(cost, [alpha] + [0.0]*(nUsers+nItems), derivative, args = (labels, 1e-5)) # + f = open("predictions_Rating.txt", 'w') for l in open("pairs_Rating.txt"): if l.startswith("userID"): #header f.write(l) continue u,b = l.strip().split('-') f.write(u + '-' + b + ',' + str(prediction(u,b)) + '\n') f.close() # - # The Kaggle username is "Renjie Zhu".
hw3/hw3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pathlib import Path import shutil from bids import BIDSLayout import pandas as pd import numpy as np from io import StringIO import re import nibabel as nb import json import copy pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.max_colwidth', 500) pd.set_option('display.width', 1000) from IPython.core.display import display, HTML display(HTML("<style>" + "#notebook { padding-top:0px !important; } " + ".container { width:100% !important; } " + ".end_space { min-height:0px !important; } " + "</style>")) # + project_root = Path('/data/MBDU/midla') bids_dir = project_root / 'data/bids' derivatives_dir = project_root / 'data/derivatives' swarm_cmd_dir = project_root / 'swarm/fmriprep/swarm_cmds' sing_img_dir = Path('/data/MBDU/singularity_images/') image_path = (sing_img_dir/'fmriprep_20.1.0.simg').as_posix() fs_licence_path = sing_img_dir/'license.txt' assert fs_licence_path.exists() fs_licence_path = fs_licence_path.as_posix() # change this for other fmriprep runs #run_name = 'rn_aroma' #fmriprep_out = derivatives_dir / 'fmriprep' / run_name #cmd_file = swarm_cmd_dir / run_name #swarm_log_dir = project_root / ('swarm/fmriprep/swarm_logs' + run_name) jobids = {} # - database_path='/data/MBDU/midla/notebooks/pybids10_20200325' # %time layout = BIDSLayout(bids_dir, database_path=database_path, ignore=[], force_index=[]) subjects = layout.get(return_type='id', target='subject', suffix='T1w') # + def jobhist(jid): # jh = !jobhist {jid} jt = [] nblanks=0 for ll in jh: if nblanks >=2 and ll != '': jt.append(ll) if ll == '': nblanks+=1 jhdf = pd.read_csv(StringIO('\n'.join(jt)), delim_whitespace=True) states = ["RUNNING", "COMPLETED", "FAILED", "CANCELLED"] print("Job statuses:") for st in states: stdf = jhdf.query('State == @st') print(f"{st}: {len(stdf)}") return jhdf def subj_status(jhdf, subjects, statuses): if len(jhdf) != len(subjects): raise ValueError('jhdf and subjects should have the same length') status_runs = np.array(jhdf.loc[jhdf.State.isin(statuses), 'Jobid'].str.split('_').str[1]).astype(int) status_subjs = np.array(subjects)[status_runs] return status_subjs def put_back(orig_run_name, run_name, project_root, worked): bids_dir = project_root / 'data/bids' derivatives_dir = project_root / 'data/derivatives' orig_fmriprep_out = derivatives_dir / 'fmriprep' / orig_run_name rr_fmriprep_out = derivatives_dir / 'fmriprep' / run_name orig_fails = derivatives_dir / 'fmriprep' / (orig_run_name + '_failure_archive') assert orig_fails != rr_fmriprep_out orig_fails.mkdir(exist_ok=True) sing_cmds = [] for sid in worked: rr_out_dir = rr_fmriprep_out / f'sub-{sid}' orig_out_dir = orig_fmriprep_out / f'sub-{sid}' orig_fail_dir = orig_fails / f'sub-{sid}' if orig_out_dir.exists(): shutil.move(orig_out_dir, orig_fail_dir) assert ~orig_out_dir.exists() shutil.move(rr_out_dir, orig_out_dir) def make_sing_cmd(subj, image_path, bids_dir, fs_license_path, subj_out_dir, longitudinal=True, aroma=True, t2scoreg=True, dummy_scans=4, save_work_directory=False, verbose=False, melodic_dims=None, echo_idx=None, task=None, anat_only=False, bids_filter_file=None, output_spaces=None, fs_subjects_dir=None, mnitobold=False): if output_spaces is None: output_spaces = ['MNI152NLin2009cAsym:res-2', 'fsaverage'] output_spaces = ' '.join(output_spaces) sing_cmd = f'export TMPDIR=/lscratch/$SLURM_JOB_ID && \ export SINGULARITY_BINDPATH="/gs4,/gs5,/gs6,/gs7,/gs8,/gs9,/gs10,/gs11,/spin1,/scratch,/fdb,/data,/lscratch" &&\ mkdir -p $TMPDIR/out && \ mkdir -p $TMPDIR/wrk && \ singularity run --cleanenv --bind /data/MBDU/nielsond/fmriprep/fmriprep/:/usr/local/miniconda/lib/python3.7/site-packages/fmriprep {image_path} {bids_dir} \ $TMPDIR/out participant \ --participant_label {subj} \ -w $TMPDIR/wrk \ --nprocs $SLURM_CPUS_PER_TASK \ --mem_mb $SLURM_MEM_PER_NODE \ --fs-license-file {fs_licence_path} \ --output-spaces {output_spaces} \ --dummy-scans {dummy_scans}' if anat_only: sing_cmd += " --anat-only" if bids_filter_file: sing_cmd += f" --bids-filter-file {bids_filter_file.as_posix()}" if longitudinal: sing_cmd += " --longitudinal" if aroma: sing_cmd += " --use-aroma --error-on-aroma-warnings" if melodic_dims is not None: sing_cmd += f" --aroma-melodic-dimensionality {melodic_dims}" if echo_idx is not None: sing_cmd += f" --echo-idx {echo_idx}" if task is not None: sing_cmd += f" --task-id {task}" if t2scoreg: sing_cmd += " --t2s-coreg" if verbose: sing_cmd += " -vvv" sing_cmd += '; ' if not mnitobold: sing_cmd += " STATUS=$? ; " # Save the exit status of fmri prep, but allow the rsync to run so we can see the log sing_cmd += f" rsync -ach $TMPDIR/out {subj_out_dir} ;" if save_work_directory: subj_work_dir = subj_out_dir / "wrk" sing_cmd += f" rsync -ach $TMPDIR/wrk {subj_work_dir} ; " sing_cmd += f" chown -R :MBDU {subj_out_dir} ; " else: sing_cmd += 'mkdir $TMPDIR/mnitobold; ' sing_cmd += f' singularity exec --cleanenv --bind /data/MBDU/nielsond/fmriprep-fix-cli-parser/fmriprep/:/usr/local/miniconda/lib/python3.7/site-packages/fmriprep \ {image_path} /data/MBDU/midla/notebooks/code/run_mnitobold.sh ' sing_cmd += ' $TMPDIR $TMPDIR/mnitobold ' sing_cmd += ' /data/MBDU/midla/data/templates/tpl-MNI152NLin2009cAsym_res-02_T1w.nii.gz /data/MBDU/midla/data/templates/tpl-MNI152NLin2009cAsym_res-02_desc-carpet_dseg.nii.gz' sing_cmd += ' --omp-nthreads=$SLURM_CPUS_PER_TASK --mem-gb=$SLURM_MEM_PER_NODE ;' sing_cmd += " STATUS=$? ; " # Save the exit status of fmri prep, but allow the rsync to run so we can see the log sing_cmd += f' rsync -ach $TMPDIR/mnitobold/ /data/MBDU/midla/data/derivatives/mnitobold/run1/sub-{subj} ;' sing_cmd += f' chown -R :MBDU /data/MBDU/midla/data/derivatives/mnitobold/run1/sub-{subj} ;' sing_cmd +=" (exit $STATUS)" # exit a subshell with the saved status so that hpc correctly identifies it as a success or failure return sing_cmd def write_swarm_file(run_name, project_root, image_path, fs_license_path, subjects, layout=None, sing_sess=False, defaced=False, update=False, bids_filter=None, multiecho_masking_test=False, fs_subjects_run=None, **kwargs): if defaced: bids_dir = project_root / 'data/bids_defaced' elif multiecho_masking_test: bids_dir = project_root / 'data/multiecho_masking_test' else: bids_dir = project_root / 'data/bids' derivatives_dir = project_root / 'data/derivatives' swarm_cmd_dir = project_root / 'swarm/fmriprep/swarm_cmds' fmriprep_out = derivatives_dir / 'fmriprep' / run_name if fs_subjects_run is not None: fs_base_dir = derivatives_dir / fs_subjects_run cmd_file = swarm_cmd_dir / run_name swarm_log_dir = project_root / 'swarm/fmriprep/swarm_logs' / run_name if (bids_filter is not None) and not sing_sess: fmriprep_out.mkdir(parents=True, exist_ok=True) bids_filter_file = fmriprep_out / 'bids_filter' bids_filter_file.write_text(json.dumps(bids_filter, indent=2)) kwargs['bids_filter_file'] = bids_filter_file sing_cmds = [] old_subjs = [] for sid in subjects: if fs_subjects_run is not None: fs_subjects_dir = fs_base_dir / f'sub-{sid}/out/freesurfer' else: fs_subjects_dir = None if sing_sess: if layout is None: raise ValueError("Must pass a layout if you're running single session.") if bids_filter is None: bids_filter = { 't1w': {'datatype': 'anat', 'suffix': 'T1w'}, } sessions = layout.get(return_type='id', target='session', subject=sid, **bids_filter['t1w']) for sesid in sessions: ses_out_dir = fmriprep_out / f'sub-{sid}/ses-{sesid}' ses_out_dir.mkdir(parents=True, exist_ok=True) ses_filter = copy.deepcopy(bids_filter) for modality in ses_filter.keys(): ses_filter[modality]['session'] = sesid ses_filter_file = ses_out_dir / f'sub-{sid}_ses-{sesid}.filter' ses_filter_file.write_text(json.dumps(ses_filter, indent=2)) kwargs['bids_filter_file'] = ses_filter_file sing_cmd = make_sing_cmd(sid, image_path, bids_dir, fs_licence_path, ses_out_dir,fs_subjects_dir=fs_subjects_dir, **kwargs) sing_cmds.append(sing_cmd) else: subj_out_dir = fmriprep_out / f'sub-{sid}' if update and (subj_out_dir / f'out/fmriprep/sub-{sid}.html').exists(): print(f"Skipping {sid} because results for the subject exist.") old_subjs.append(sid) else: subj_out_dir.mkdir(parents=True, exist_ok=True) # TODO: make sure the ones we think failed, actually failed sing_cmd = make_sing_cmd(sid, image_path, bids_dir, fs_licence_path, subj_out_dir, fs_subjects_dir=fs_subjects_dir, **kwargs) sing_cmds.append(sing_cmd) cmd_file = Path(cmd_file.as_posix()) cmd_file.write_text('\n'.join(sing_cmds)) tmp = cmd_file.read_text().split('\n') for i, tt in enumerate(tmp): print(tt) if i > 5: break if not sing_sess: assert len(tmp) == (len(subjects) - len(old_subjs)) return cmd_file, swarm_log_dir, tmp # - # some subjects fail the longitudinal pipeline, for right now, Ignore them # at some point, should update code to handle this maybe known_hard_subjects = ['24028', '22477'] easy_subjects = [ss for ss in subjects if ss not in known_hard_subjects] # + run_name = 'fmriprepv20.1.0_20200528_2mm_clifix_noaroma_bold_output' bids_filter = { "t1w": { "datatype": "anat", "reconstruction": "prenorm", "suffix": "T1w" }} cmd_file, swarm_log_dir, _cmds = write_swarm_file(run_name, project_root, image_path, fs_licence_path, easy_subjects, layout=layout, sing_sess=False, save_work_directory=True, verbose=True, output_spaces = ['MNI152NLin2009cAsym:res-2', 'func'], anat_only=False, bids_filter=bids_filter, longitudinal=True, aroma=False, t2scoreg=False, mnitobold=True, fs_subjects_run='fmriprepv20.1.0_20200528_2mm_clifix_noaroma') run_name, cmd_file, swarm_log_dir # - jobids[run_name] = ! swarm -f {cmd_file} --gres=lscratch:200 -g 64 -t 24 --module singularity,webproxy --time 48:00:00 --logdir {swarm_log_dir} --job-name {run_name} jobids[run_name] = jobids[run_name][0] jobids[run_name]
notebook/example_of_running_mnitobold_on_swarm.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Análisis aplicación climMAPcore # # Mediante el uso del lenguaje de programación Python se corroborarra la información que genera la aplicación climMAPcore para el Estado de Aguascalientes # ## Regresión Lineal # librerias import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression # leer el csv data = pd.read_csv('../../data/db_join_wrf_tpro_10_25_tmid_10_20.csv') # estructura del dataFrame data.head() # columnas del dataframe data.columns # información del dataFrame data.info() # utilizar solo las columnas con datos data = data[['incidencia', 'prec', 'tmax', 'tmin', 'tpro', 'humr', 'dpoint','tmidnight','indicePresencia','porcentajePresencia']] # checar estructura del dataFrame data.head() rl = LinearRegression() rl.fit(data, data['incidencia']) # lista de coeficientes B para cada X list(zip(data.columns, rl.coef_)) # generando las predicciones predicciones = rl.predict(data) predicciones_df = pd.DataFrame(predicciones, columns=['Pred']) # predicciones de las primeras 5 líneas predicciones_df.head() # calculando el desvio np.mean(data['incidencia'] - predicciones) # ## Regresión Logística # creando un dataset de ejemplo from sklearn.datasets import make_classification X, y = make_classification(n_samples=1000, n_features=4) # + # Importando el modelo from sklearn.linear_model import LogisticRegression rlog = LogisticRegression() # Creando el modelo # - # Dividiendo el dataset en entrenamiento y evaluacion X_entrenamiento = X[:-200] X_evaluacion = X[-200:] y_entrenamiento = y[:-200] y_evaluacion = y[-200:] rlog.fit(X_entrenamiento, y_entrenamiento) #ajustando el modelo # Realizando las predicciones y_predic_entrenamiento = rlog.predict(X_entrenamiento) y_predic_evaluacion = rlog.predict(X_evaluacion) # Verificando la exactitud del modelo entrenamiento = (y_predic_entrenamiento == y_entrenamiento).sum().astype(float) / y_entrenamiento.shape[0] print("sobre datos de entrenamiento: {0:.2f}".format(entrenamiento)) evaluacion = (y_predic_evaluacion == y_evaluacion).sum().astype(float) / y_evaluacion.shape[0] print("sobre datos de evaluación: {0:.2f}".format(evaluacion)) # ## Arboles de decisión # Creando un dataset de ejemplo X, y = make_classification(1000, 20, n_informative=3) # Importando el arbol de decisión from sklearn.tree import DecisionTreeClassifier from sklearn import tree ad = DecisionTreeClassifier(criterion='entropy', max_depth=3) # Creando el modelo ad.fit(X, y) # Ajustando el modelo #generando archivo para graficar el arbol with open("mi_arbol.dot", 'w') as archivo_dot: tree.export_graphviz(ad, out_file = archivo_dot) # utilizando el lenguaje dot para graficar el arbol. # !dot -Tjpeg mi_arbol.dot -o arbol_decision.jpeg # verificando la precisión print("precisión del modelo: {0: .2f}".format((y == ad.predict(X)).mean())) # ## Random Forest # Creando un dataset de ejemplo X, y = make_classification(1000) # Importando el random forest from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() # Creando el modelo rf.fit(X, y) # Ajustando el modelo # verificando la precisión print("precisión del modelo: {0: .2f}".format((y == rf.predict(X)).mean())) # ### validación climMAPcore # determinar los valores X = data.iloc[:, 1:] y = data['incidencia'] # Importando el random forest from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() # Creando el modelo rf.fit(X,y) # verificando la precisión print("precisión del modelo: {0: .2f}".format((y == rf.predict(X)).mean())) # ## SVM o Máquinas de vectores de soporte # importanto SVM from sklearn import svm from sklearn.svm import SVC # determinar los valores a utilizar data.columns # determinar la información X = data.iloc[:, 1:] y = data['incidencia'] # tamaño de la malla del gráfico h = 0.02 # creando el SVM con sus diferentes métodos C = 1.0 # parametro de regulacion SVM svc = svm.SVC(kernel='linear', C=C).fit(X, y) rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y) poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y) lin_svc = svm.LinearSVC(C=C).fit(X, y)
ejercicios/6_Machine_Learning/MachineLearning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import cadquery as cq # Points we will use to create spline and polyline paths to sweep over pts = [ (0, 1), (1, 2), (2, 4) ] # Spline path generated from our list of points (tuples) path = cq.Workplane("XZ").spline(pts) # Sweep a circle with a diameter of 1.0 units along the spline path we just created defaultSweep = cq.Workplane("XY").circle(1.0).sweep(path) # Sweep defaults to making a solid and not generating a Frenet solid. Setting Frenet to True helps prevent creep in # the orientation of the profile as it is being swept frenetShell = cq.Workplane("XY").circle(1.0).sweep(path, makeSolid=True, isFrenet=True) # We can sweep shapes other than circles defaultRect = cq.Workplane("XY").rect(1.0, 1.0).sweep(path) # Switch to a polyline path, but have it use the same points as the spline path = cq.Workplane("XZ").polyline(pts) # Using a polyline path leads to the resulting solid having segments rather than a single swept outer face plineSweep = cq.Workplane("XY").circle(1.0).sweep(path) # Switch to an arc for the path path = cq.Workplane("XZ").threePointArc((1.0, 1.5), (0.0, 1.0)) # Use a smaller circle section so that the resulting solid looks a little nicer arcSweep = cq.Workplane("XY").circle(0.5).sweep(path) # Translate the resulting solids so that they do not overlap and display them left to right # show_object(defaultSweep) # show_object(frenetShell.translate((5, 0, 0))) # show_object(defaultRect.translate((10, 0, 0))) # show_object(plineSweep.translate((15, 0, 0))) show_object(arcSweep.translate((20, 0, 0)))
examples/Ex031_Sweep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 - AzureML # language: python # name: python3-azureml # --- # # Safari Challenge # # In this challenge, you must use what you've learned to train a convolutional neural network model that classifies images of animals you might find on a safari adventure. # # ## Explore the data # # The training images you must use are in the **/safari/training** folder. Run the cell below to see an example of each image class, and note the shape of the images (which indicates the dimensions of the image and its color channels). # + import numpy as np import os import matplotlib.pyplot as plt import matplotlib.image as mpimg # %matplotlib inline # The images are in the data/shapes folder data_path = 'data/safari/training' # Get the class names classes = os.listdir(data_path) classes.sort() print(len(classes), 'classes:') print(classes) # Show the first image in each folder fig = plt.figure(figsize=(12, 12)) i = 0 for sub_dir in os.listdir(data_path): i+=1 img_file = os.listdir(os.path.join(data_path,sub_dir))[2] img_path = os.path.join(data_path, sub_dir, img_file) img = mpimg.imread(img_path) img_shape = np.array(img).shape a=fig.add_subplot(1, len(classes),i) a.axis('off') imgplot = plt.imshow(img) a.set_title(img_file + ' : ' + str(img_shape)) plt.show() # - # Now that you've seen the images, use your preferred framework (PyTorch or TensorFlow) to train a CNN classifier for them. Your goal is to train a classifier with a validation accuracy of 95% or higher. # # Add cells as needed to create your solution. # # > **Note**: There is no single "correct" solution. Sample solutions are provided in [05 - Safari CNN Solution (PyTorch).ipynb](05%20-%20Safari%20CNN%20Solution%20(PyTorch).ipynb) and [05 - Safari CNN Solution (TensorFlow).ipynb](05%20-%20Safari%20CNN%20Solution%20(TensorFlow).ipynb). # + from tensorflow.keras.preprocessing.image import ImageDataGenerator # Creating the dataset image_size=(200,200) batch_size=30 print('Generating Data....') datagen=ImageDataGenerator(rescale=1/255,validation_split=0.3) # print('Preparing training Data...') train_generator=datagen.flow_from_directory(data_path, target_size=image_size, batch_size=batch_size, class_mode='categorical', subset='training') print('Preparing the validation dataset') validation_generator=datagen.flow_from_directory(data_path, target_size=image_size, batch_size=batch_size, class_mode='categorical', subset='validation') # - # Your Code to train a CNN model... from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout,Flatten,Dense # + model= Sequential() model.add(Conv2D(32,(6,6), input_shape=train_generator.image_shape, activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(32,(6,6), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(32,(6,6), activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.1)) model.add(Flatten()) model.add(Dense(train_generator.num_classes, activation='Softmax')) model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) # - model.summary() epochs=6 history= model.fit(train_generator,epochs=epochs, batch_size=batch_size,validation_data=validation_generator) # + # Viewing the loss import matplotlib.pyplot as plt training_loss= history.history['loss'] validation_loss= history.history['val_loss'] training_acc= history.history['accuracy'] validation_acc= history.history['val_accuracy'] plt.plot(range(1,epochs+1),training_loss) plt.plot(range(1,epochs+1),validation_loss) plt.plot(range(1,epochs+1),training_acc) plt.plot(range(1,epochs+1),validation_acc) plt.xlabel('epoch') plt.ylabel('loss') plt.title('loss/acc Graph') # + from sklearn.metrics import confusion_matrix, classification_report print('Generating predictions from Validation data ') x_test= validation_generator[0][0] #image data y_test= validation_generator[0][1] # - x_test[0].shape class_proba= model.predict(x_test) preds= np.argmax(class_proba,axis=1) true_labels= np.argmax(y_test,axis=1) preds.shape,true_labels.shape cm=confusion_matrix(true_labels,preds) print('Confusion Matrix\n', cm) print('Classification Report\n', classification_report(true_labels,preds)) # ## Save your model # # Add code below to save your model's trained weights. # Code to save your model modelFileName= 'models/shape_classifier.h5' model.save(modelFileName) del model print('model is saved as', modelFileName) # ## Use the trained model # # Now that we've trained your model, modify the following code as necessary to use it to predict the classes of the provided test images. # + tags=[] from tensorflow.keras import models import numpy as np import os # %matplotlib inline # Function to predict the class of an image def predict_image(classifier, image): from tensorflow import convert_to_tensor # The model expects a batch of images as input, so we'll create an array of 1 image imgfeatures = img.reshape(1, img.shape[0], img.shape[1], img.shape[2]) # We need to format the input to match the training data # The generator loaded the values as floating point numbers # and normalized the pixel values, so... imgfeatures = imgfeatures.astype('float32') imgfeatures /= 255 # Use the model to predict the image class class_probabilities = classifier.predict(imgfeatures) # Find the class predictions with the highest predicted probability index = int(np.argmax(class_probabilities, axis=1)[0]) return index # Load your model model = models.load_model(modelFileName) # loads the saved model # The images are in the data/shapes folder test_data_path = 'data/safari/test' # Show the test images with predictions fig = plt.figure(figsize=(8, 12)) i = 0 for img_file in os.listdir(test_data_path): i+=1 img_path = os.path.join(test_data_path, img_file) img = mpimg.imread(img_path) # Get the image class prediction index = predict_image(model, np.array(img)) a=fig.add_subplot(1, len(classes),i) a.axis('off') imgplot = plt.imshow(img) a.set_title(classes[index]) plt.show() # - # Hopefully, your model predicted all four of the image classes correctly!
challenges/05 - Safari CNN Challenge_solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="5wFF5JFyD2Ki" # #### Copyright 2019 The TensorFlow Hub Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # + id="Uf6NouXxDqGk" # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # + [markdown] id="ORy-KvWXGXBo" # # Exploring the TF-Hub CORD-19 Swivel Embeddings # # + [markdown] id="MfBg1C5NB3X0" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/cord_19_embeddings_keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # <td> # <a href="https://tfhub.dev/tensorflow/cord-19/swivel-128d/3"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> # </td> # </table> # + [markdown] id="yI6Mh3-P0_Pk" # The CORD-19 Swivel text embedding module from TF-Hub (https://tfhub.dev/tensorflow/cord-19/swivel-128d/3) # was built to support researchers analyzing natural languages text related to COVID-19. # These embeddings were trained on the titles, authors, abstracts, body texts, and # reference titles of articles in the [CORD-19 dataset](https://pages.semanticscholar.org/coronavirus-research). # # In this colab we will: # - Analyze semantically similar words in the embedding space # - Train a classifier on the SciCite dataset using the CORD-19 embeddings # # + [markdown] id="gVWOrccw0_Pl" # ## Setup # # + id="Ym2nXOPuPV__" import functools import itertools import matplotlib.pyplot as plt import numpy as np import seaborn as sns import pandas as pd import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_hub as hub from tqdm import trange # + [markdown] id="_VgRRf2I7tER" # # Analyze the embeddings # # Let's start off by analyzing the embedding by calculating and plotting a correlation matrix between different terms. If the embedding learned to successfully capture the meaning of different words, the embedding vectors of semantically similar words should be close together. Let's take a look at some COVID-19 related terms. # + id="HNN_9bBKSLHU" # Use the inner product between two embedding vectors as the similarity measure def plot_correlation(labels, features): corr = np.inner(features, features) corr /= np.max(corr) sns.heatmap(corr, xticklabels=labels, yticklabels=labels) # Generate embeddings for some terms queries = [ # Related viruses 'coronavirus', 'SARS', 'MERS', # Regions 'Italy', 'Spain', 'Europe', # Symptoms 'cough', 'fever', 'throat' ] module = hub.load('https://tfhub.dev/tensorflow/cord-19/swivel-128d/3') embeddings = module(queries) plot_correlation(queries, embeddings) # + [markdown] id="Bg-PGqtm8B7K" # We can see that the embedding successfully captured the meaning of the different terms. Each word is similar to the other words of its cluster (i.e. "coronavirus" highly correlates with "SARS" and "MERS"), while they are different from terms of other clusters (i.e. the similarity between "SARS" and "Spain" is close to 0). # # Now let's see how we can use these embeddings to solve a specific task. # + [markdown] id="idJ1jFmH7xMa" # ## SciCite: Citation Intent Classification # # This section shows how one can use the embedding for downstream tasks such as text classification. We'll use the [SciCite dataset](https://www.tensorflow.org/datasets/catalog/scicite) from TensorFlow Datasets to classify citation intents in academic papers. Given a sentence with a citation from an academic paper, classify whether the main intent of the citation is as background information, use of methods, or comparing results. # + id="Ghc-CzT8DDaZ" builder = tfds.builder(name='scicite') builder.download_and_prepare() train_data, validation_data, test_data = builder.as_dataset( split=('train', 'validation', 'test'), as_supervised=True) # + id="CVjyBD0ZPh4Z" #@title Let's take a look at a few labeled examples from the training set NUM_EXAMPLES = 10#@param {type:"integer"} TEXT_FEATURE_NAME = builder.info.supervised_keys[0] LABEL_NAME = builder.info.supervised_keys[1] def label2str(numeric_label): m = builder.info.features[LABEL_NAME].names return m[numeric_label] data = next(iter(train_data.batch(NUM_EXAMPLES))) pd.DataFrame({ TEXT_FEATURE_NAME: [ex.numpy().decode('utf8') for ex in data[0]], LABEL_NAME: [label2str(x) for x in data[1]] }) # + [markdown] id="65s9UpYJ_1ct" # ## Training a citaton intent classifier # # We'll train a classifier on the [SciCite dataset](https://www.tensorflow.org/datasets/catalog/scicite) using Keras. Let's build a model which use the CORD-19 embeddings with a classification layer on top. # + id="yZUclu8xBYlj" #@title Hyperparameters { run: "auto" } EMBEDDING = 'https://tfhub.dev/tensorflow/cord-19/swivel-128d/3' #@param {type: "string"} TRAINABLE_MODULE = False #@param {type: "boolean"} hub_layer = hub.KerasLayer(EMBEDDING, input_shape=[], dtype=tf.string, trainable=TRAINABLE_MODULE) model = tf.keras.Sequential() model.add(hub_layer) model.add(tf.keras.layers.Dense(3)) model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # + [markdown] id="weZKWK-pLBll" # ## Train and evaluate the model # # Let's train and evaluate the model to see the performance on the SciCite task # + id="cO1FWkZW2WS9" EPOCHS = 35#@param {type: "integer"} BATCH_SIZE = 32#@param {type: "integer"} history = model.fit(train_data.shuffle(10000).batch(BATCH_SIZE), epochs=EPOCHS, validation_data=validation_data.batch(BATCH_SIZE), verbose=1) # + id="2sKE7kEyLJQZ" from matplotlib import pyplot as plt def display_training_curves(training, validation, title, subplot): if subplot%10==1: # set up the subplots on the first call plt.subplots(figsize=(10,10), facecolor='#F0F0F0') plt.tight_layout() ax = plt.subplot(subplot) ax.set_facecolor('#F8F8F8') ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.']) # + id="nnQfxevhLKld" display_training_curves(history.history['accuracy'], history.history['val_accuracy'], 'accuracy', 211) display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212) # + [markdown] id="BjvtOw72Lpyw" # ## Evaluate the model # # And let's see how the model performs. Two values will be returned. Loss (a number which represents our error, lower values are better), and accuracy. # + id="y0ExC8D0LX8m" results = model.evaluate(test_data.batch(512), verbose=2) for name, value in zip(model.metrics_names, results): print('%s: %.3f' % (name, value)) # + [markdown] id="dWp5OWeTL2EW" # We can see that the loss quickly decreases while especially the accuracy rapidly increases. Let's plot some examples to check how the prediction relates to the true labels: # + id="VzHzAOaaOVC0" prediction_dataset = next(iter(test_data.batch(20))) prediction_texts = [ex.numpy().decode('utf8') for ex in prediction_dataset[0]] prediction_labels = [label2str(x) for x in prediction_dataset[1]] predictions = [ label2str(x) for x in np.argmax(model.predict(prediction_texts), axis=-1)] pd.DataFrame({ TEXT_FEATURE_NAME: prediction_texts, LABEL_NAME: prediction_labels, 'prediction': predictions }) # + [markdown] id="OSGcrkE069_Q" # We can see that for this random sample, the model predicts the correct label most of the times, indicating that it can embed scientific sentences pretty well. # + [markdown] id="oLE0kCfO5CIA" # # What's next? # # Now that you've gotten to know a bit more about the CORD-19 Swivel embeddings from TF-Hub, we encourage you to participate in the CORD-19 Kaggle competition to contribute to gaining scientific insights from COVID-19 related academic texts. # # * Participate in the [CORD-19 Kaggle Challenge](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge) # * Learn more about the [COVID-19 Open Research Dataset (CORD-19)](https://pages.semanticscholar.org/coronavirus-research) # * See documentation and more about the TF-Hub embeddings at https://tfhub.dev/tensorflow/cord-19/swivel-128d/3 # * Explore the CORD-19 embedding space with the [TensorFlow Embedding Projector](http://projector.tensorflow.org/?config=https://storage.googleapis.com/tfhub-examples/tensorflow/cord-19/swivel-128d/3/tensorboard/projector_config.json)
site/en-snapshot/hub/tutorials/cord_19_embeddings_keras.ipynb