code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![title](imgs/1.png) # ![title](imgs/2.png) # ![title](imgs/3.png) # ![title](imgs/4.png) # ![title](imgs/5.png) # ![title](imgs/6.png) # ![title](imgs/7.png) l = [1,2,3,4] t = (1,2,3,4) s = 'python' s.index('y') s.index('z') s.index('n',12) s[1:4] list(enumerate(l)) l[0:5] s[5:0:-1] s[::-1] # + # Caveats # - from decimal import Decimal a = Decimal('10.5') b = Decimal('10.5') a is b a == b l = [Decimal('10.5')] id(l[0]) l2 = l*2 id(l2[0]) id(l2[1]) l = [[0,0]] l2 = l*2 l2 id(l2[0]), id(l2[1]) l2[0][0] = 100 l2 l = [[0,0]] l2 = l * 2 print(l2) l[0][0] = 1 print(l) print(l2) # # Mutable Sequence Types # ![title](imgs/8.png) # ![title](imgs/9.png) # ![title](imgs/10.png) l = [1,2,3,4,5] print(id(l)) l.clear() l # # List vs Tuple from dis import dis l = [1,2,3] t = (1,2,3) dis(compile('[1,2,3]', 'string','eval')) dis(compile('(1,2,3)', 'string','eval')) dis(compile('(1,2,3,[4,5])', 'string','eval')) from timeit import timeit timeit("(1,2,3,4,5,6,7,8,9)",number=10_000_000) timeit("[1,2,3,4,5,6,7,8,9]",number=10_000_000) l1 = [1,2,3,4,5,6,7,8,9] t1 = (1,2,3,4,5,6,7,8,9) id(l1), id(t1) # Different addresses but shallow copies l2 = list(l1) id(l1), id(l2) # Same addresses -- No need to change because objects are immutable t2 = tuple(t1) id(t1), id(t2) timeit('tuple((1,2,3,4,5,6,7,8,9))', number = 5_000_000) timeit('list((1,2,3,4,5,6,7,8,9))', number = 5_000_000) # ### Storage Efficiency import sys t = tuple() prev = sys.getsizeof(t) for i in range(10): c = tuple(range(i+1)) size_c = sys.getsizeof(c) delta, prev = size_c - prev, size_c print(f'{i+1} items: {size_c}, {delta}') l = list() prev = sys.getsizeof(l) for i in range(10): c = list(range(i+1)) size_c = sys.getsizeof(c) delta, prev = size_c - prev, size_c print(f'{i+1} items: {size_c}, {delta}')
1. Sequences.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # load the raw dataset import numpy as np import pandas as pd from pandas import DataFrame rawdf = pd.read_csv("unc.edu_PANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv", sep="\t", index_col=0) rawdf.head() # - # transpose raw dataset so row is patient samples and column is list of genes processeddf = rawdf.transpose() # load class label dflabels = pd.read_csv('project_class_labels_original_10471.csv',index_col='Unnamed: 0') # check to see if there is any feature (i.e. column) has all zero values so we will delete them removedAllZeroColdf = processeddf.loc[:, (processeddf != 0).any(axis=0)] removedAllZeroColdf.shape # + # Feature Selection - Variance Threshold from sklearn.feature_selection import VarianceThreshold sel = VarianceThreshold(threshold=500000) reduced = sel.fit_transform(removedAllZeroColdf) reduceddf = DataFrame(reduced) reduced.shape # + # data scaling # method 2 : min-max norm from sklearn.preprocessing import MinMaxScaler min_max_scaler = MinMaxScaler() min_max_scalerfit = min_max_scaler.fit_transform(removedAllZeroColdf) minmaxdf = DataFrame(min_max_scalerfit) minmaxdf.head() # + # split 80% training set; 20% testing set from sklearn.model_selection import train_test_split trainData, testData, trainLabel, testLabel = train_test_split(reduceddf, dflabels, test_size=0.20) # + # svm model - Polynomial kernel function from sklearn import svm from sklearn.svm import LinearSVC from sklearn.metrics import accuracy_score # measure training time import time poly_svm_clf = svm.SVC(kernel='poly') start = time.time() poly_svm_clf.fit(trainData, trainLabel.values.ravel()) end = time.time() print("linear SVC training time: ", end - start) pred = poly_svm_clf.predict(testData) accuracy = accuracy_score(testLabel, pred) print("Accuracy Score (polynomial kernel):", accuracy) # confusion matrix from sklearn.metrics import confusion_matrix confusion_matrix = confusion_matrix(testLabel, pred) print(confusion_matrix) # classficiation report from sklearn.metrics import classification_report print(classification_report(testLabel, pred))
project_exp_16_Poly_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # change to the root directory of the project import os if os.getcwd().split("/")[-1] == "examples": os.chdir('..') # This will reload all imports as soon as the code changes # %load_ext autoreload # %autoreload 2 # + try: import matplotlib.pyplot as plt except ImportError: import sys # !{sys.executable} -m pip install matplotlib import matplotlib.pyplot as plt import numpy as np from neurolib.models.aln import ALNModel from neurolib.utils.parameterSpace import ParameterSpace from neurolib.optimize.exploration import BoxSearch # a nice color map plt.rcParams['image.cmap'] = 'plasma' # - aln = ALNModel() parameters = ParameterSpace({"mue_ext_mean": np.linspace(0, 3, 2), "mui_ext_mean": np.linspace(0, 3, 2)}) # info: chose np.linspace(0, 3, 21) or more, values here are low for testing search = BoxSearch(aln, parameters, filename="example-1.hdf") search.run() search.loadResults() print("Number of results: {}".format(len(search.results))) # Example analysis of the results # The .results attribute is a list and can be indexed by the run # number (which is also the index of the pandas dataframe .dfResults). # Here we compute the maximum firing rate of the node in the last second # and add the result (a float) to the pandas dataframe. for i in search.dfResults.index: search.dfResults.loc[i, 'max_r'] = np.max(search.results[i]['rates_exc'][:, -int(1000/aln.params['dt']):]) plt.imshow(search.dfResults.pivot_table(values='max_r', index = 'mui_ext_mean', columns='mue_ext_mean'), \ extent = [min(search.dfResults.mue_ext_mean), max(search.dfResults.mue_ext_mean), min(search.dfResults.mui_ext_mean), max(search.dfResults.mui_ext_mean)], origin='lower') plt.colorbar(label='Maximum rate [Hz]') plt.xlabel("Input to E") plt.ylabel("Input to I")
examples/example-1-aln-parameter-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # cd /files_for_calculating_gradients_group import scipy as scipy import scipy.cluster as scipycluster import nibabel as nib import nibabel.gifti import nilearn from nilearn import datasets, plotting import sys import pandas as pd import numpy as np import os import matplotlib as mpl import matplotlib.pyplot as plt import pylab as plt from matplotlib.colors import ListedColormap mpl.rcParams['svg.fonttype'] = 'none' import sys sys.path.append('gradient_data/src/') from PIL import Image res = nib.load('result_cerebellumonly.dscalar.nii').get_data() res.shape = (8, 18142) # + # Generate matrix of each area of representation with either 1 or 0 SUITatlas = nib.load('SUITatlas.dscalar.nii').get_data() Left_I_IV = ((SUITatlas == 1).astype(int))[0] Right_I_IV = ((SUITatlas == 2).astype(int))[0] Left_V = ((SUITatlas == 3).astype(int))[0] Right_V = ((SUITatlas == 4).astype(int))[0] Left_VI = ((SUITatlas == 5).astype(int))[0] Vermis_VI = ((SUITatlas == 6).astype(int))[0] Right_VI = ((SUITatlas == 7).astype(int))[0] Left_CrusI = ((SUITatlas == 8).astype(int))[0] Vermis_CrusI = ((SUITatlas == 9).astype(int))[0] Right_CrusI = ((SUITatlas == 10).astype(int))[0] Left_CrusII = ((SUITatlas == 11).astype(int))[0] Vermis_CrusII = ((SUITatlas == 12).astype(int))[0] Right_CrusII = ((SUITatlas == 13).astype(int))[0] Left_VIIb = ((SUITatlas == 14).astype(int))[0] Vermis_VIIb = ((SUITatlas == 15).astype(int))[0] Right_VIIb = ((SUITatlas == 16).astype(int))[0] Left_VIIIa = ((SUITatlas == 17).astype(int))[0] Vermis_VIIIa = ((SUITatlas == 18).astype(int))[0] Right_VIIIa = ((SUITatlas == 19).astype(int))[0] Left_VIIIb = ((SUITatlas == 20).astype(int))[0] Vermis_VIIIb = ((SUITatlas == 21).astype(int))[0] Right_VIIIb = ((SUITatlas == 22).astype(int))[0] Left_IX = ((SUITatlas == 23).astype(int))[0] Vermis_IX = ((SUITatlas == 24).astype(int))[0] Right_IX = ((SUITatlas == 25).astype(int))[0] Left_X = ((SUITatlas == 26).astype(int))[0] Vermis_X = ((SUITatlas == 27).astype(int))[0] Right_X = ((SUITatlas == 28).astype(int))[0] # - GRADIENT1 = res[0] GRADIENT2 = res[1] GRADIENT2_FIRSTREP_R = GRADIENT2 * (Right_VI + Right_CrusI) GRADIENT2_SECONDREP_R = GRADIENT2 * (Right_CrusI + Right_VIIb) GRADIENT2_THIRDREP_R = GRADIENT2 * (Right_IX + Right_X) GRADIENT1_THIRDREP_R = GRADIENT1 * (Right_IX + Right_X) GRADIENT1_FIRSTANDSECONDREP_R = GRADIENT1 * (Right_VI + Right_CrusI + Right_CrusII + Right_VIIb) GRADIENT1_FIRSTREP_INVERSE_R = (GRADIENT1 * -1) * (Right_I_IV) GRADIENT1_SECONDREP_INVERSE_R = (GRADIENT1 * -1) * (Right_VIIIa + Right_VIIIb) GRADIENT2_FIRSTREP_L = GRADIENT2 * (Left_VI + Left_CrusI) GRADIENT2_SECONDREP_L = GRADIENT2 * (Left_CrusI + Left_VIIb) GRADIENT2_THIRDREP_L = GRADIENT2 * (Left_IX + Left_X) GRADIENT1_THIRDREP_L = GRADIENT1 * (Left_IX + Left_X) GRADIENT1_FIRSTANDSECONDREP_L = GRADIENT1 * (Left_VI + Left_CrusI + Left_CrusII + Left_VIIb) GRADIENT1_FIRSTREP_INVERSE_L = (GRADIENT1 * -1) * (Left_I_IV) GRADIENT1_SECONDREP_INVERSE_L = (GRADIENT1 * -1) * (Left_VIIIa + Left_VIIIb) # + ### Get top 5% voxels for each area of motor (gradient 1 lowest 5% values) and nonmotor representation ### (gradient 1 and 2 highest 5% values) threshold = 95 GRADIENT2_FIRSTREP_R[GRADIENT2_FIRSTREP_R == 0] = np.nan GRADIENT2_SECONDREP_R[GRADIENT2_SECONDREP_R == 0] = np.nan GRADIENT2_THIRDREP_R[GRADIENT2_THIRDREP_R == 0] = np.nan GRADIENT1_THIRDREP_R[GRADIENT1_THIRDREP_R == 0] = np.nan GRADIENT1_FIRSTANDSECONDREP_R[GRADIENT1_FIRSTANDSECONDREP_R == 0] = np.nan GRADIENT1_FIRSTREP_INVERSE_R[GRADIENT1_FIRSTREP_INVERSE_R == 0] = np.nan GRADIENT1_SECONDREP_INVERSE_R[GRADIENT1_SECONDREP_INVERSE_R == 0] = np.nan GRADIENT2_FIRSTREP_L[GRADIENT2_FIRSTREP_L == 0] = np.nan GRADIENT2_SECONDREP_L[GRADIENT2_SECONDREP_L == 0] = np.nan GRADIENT2_THIRDREP_L[GRADIENT2_THIRDREP_L == 0] = np.nan GRADIENT1_THIRDREP_L[GRADIENT1_THIRDREP_L == 0] = np.nan GRADIENT1_FIRSTANDSECONDREP_L[GRADIENT1_FIRSTANDSECONDREP_L == 0] = np.nan GRADIENT1_FIRSTREP_INVERSE_L[GRADIENT1_FIRSTREP_INVERSE_L == 0] = np.nan GRADIENT1_SECONDREP_INVERSE_L[GRADIENT1_SECONDREP_INVERSE_L == 0] = np.nan ### Make them have nans so that percentile is only calculated on the data, ### and each lobule has a percentage proportional to the size of that lobule GRADIENT2_FIRSTREP_THRESHOLD_R = np.nanpercentile(GRADIENT2_FIRSTREP_R, threshold) GRADIENT2_SECONDREP_THRESHOLD_R = np.nanpercentile(GRADIENT2_SECONDREP_R, threshold) GRADIENT2_THIRDREP_THRESHOLD_R = np.nanpercentile(GRADIENT2_THIRDREP_R, threshold) GRADIENT1_THIRDREP_THRESHOLD_R = np.nanpercentile(GRADIENT1_THIRDREP_R, threshold) GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_R = np.nanpercentile(GRADIENT1_FIRSTANDSECONDREP_R, threshold) GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_R = np.nanpercentile(GRADIENT1_FIRSTREP_INVERSE_R, threshold) GRADIENT1_SECONDREP_INVERSE_THRESHOLD_R = np.nanpercentile(GRADIENT1_SECONDREP_INVERSE_R, threshold) GRADIENT2_FIRSTREP_THRESHOLD_L = np.nanpercentile(GRADIENT2_FIRSTREP_L, threshold) GRADIENT2_SECONDREP_THRESHOLD_L = np.nanpercentile(GRADIENT2_SECONDREP_L, threshold) GRADIENT2_THIRDREP_THRESHOLD_L = np.nanpercentile(GRADIENT2_THIRDREP_L, threshold) GRADIENT1_THIRDREP_THRESHOLD_L = np.nanpercentile(GRADIENT1_THIRDREP_L, threshold) GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_L = np.nanpercentile(GRADIENT1_FIRSTANDSECONDREP_L, threshold) GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_L = np.nanpercentile(GRADIENT1_FIRSTREP_INVERSE_L, threshold) GRADIENT1_SECONDREP_INVERSE_THRESHOLD_L = np.nanpercentile(GRADIENT1_SECONDREP_INVERSE_L, threshold) # + GRADIENT2_FIRSTREP_R = GRADIENT2 * (Right_VI + Right_CrusI) GRADIENT2_SECONDREP_R = GRADIENT2 * (Right_CrusII + Right_VIIb) GRADIENT2_THIRDREP_R = GRADIENT2 * (Right_IX + Right_X) GRADIENT1_THIRDREP_R = GRADIENT1 * (Right_IX + Right_X) GRADIENT1_FIRSTANDSECONDREP_R = GRADIENT1 * (Right_VI + Right_CrusI + Right_CrusII + Right_VIIb) GRADIENT1_FIRSTREP_INVERSE_R = (GRADIENT1 * -1) * (Right_I_IV + Right_V + Right_VI) GRADIENT1_SECONDREP_INVERSE_R = (GRADIENT1 * -1) * (Right_VIIIa + Right_VIIIb) GRADIENT2_FIRSTREP_L = GRADIENT2 * (Left_VI + Left_CrusI) GRADIENT2_SECONDREP_L = GRADIENT2 * (Left_CrusII + Left_VIIb) GRADIENT2_THIRDREP_L = GRADIENT2 * (Left_IX + Left_X) GRADIENT1_THIRDREP_L = GRADIENT1 * (Left_IX + Left_X) GRADIENT1_FIRSTANDSECONDREP_L = GRADIENT1 * (Left_VI + Left_CrusI + Left_CrusII + Left_VIIb) GRADIENT1_FIRSTREP_INVERSE_L = (GRADIENT1 * -1) * (Left_I_IV + Left_V + Left_VI) GRADIENT1_SECONDREP_INVERSE_L = (GRADIENT1 * -1) * (Left_VIIIa + Left_VIIIb) ### NEED TO LOAD THESE AGAIN SO THAT THEY DO NOT HAVE NANS GRADIENT2_FIRSTREP_R[GRADIENT2_FIRSTREP_R < GRADIENT2_FIRSTREP_THRESHOLD_R] = 0 GRADIENT2_FIRSTREP_R[GRADIENT2_FIRSTREP_R >= GRADIENT2_FIRSTREP_THRESHOLD_R] = 1 GRADIENT2_SECONDREP_R[GRADIENT2_SECONDREP_R < GRADIENT2_SECONDREP_THRESHOLD_R] = 0 GRADIENT2_SECONDREP_R[GRADIENT2_SECONDREP_R >= GRADIENT2_SECONDREP_THRESHOLD_R] = 1 GRADIENT2_THIRDREP_R[GRADIENT2_THIRDREP_R < GRADIENT2_THIRDREP_THRESHOLD_R] = 0 GRADIENT2_THIRDREP_R[GRADIENT2_THIRDREP_R >= GRADIENT2_THIRDREP_THRESHOLD_R] = 1 GRADIENT1_THIRDREP_R[GRADIENT1_THIRDREP_R < GRADIENT1_THIRDREP_THRESHOLD_R] = 0 GRADIENT1_THIRDREP_R[GRADIENT1_THIRDREP_R >= GRADIENT1_THIRDREP_THRESHOLD_R] = 1 GRADIENT1_FIRSTANDSECONDREP_R[GRADIENT1_FIRSTANDSECONDREP_R < GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_R] = 0 GRADIENT1_FIRSTANDSECONDREP_R[GRADIENT1_FIRSTANDSECONDREP_R >= GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_R] = 1 GRADIENT1_FIRSTREP_INVERSE_R[GRADIENT1_FIRSTREP_INVERSE_R < GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_R] = 0 GRADIENT1_FIRSTREP_INVERSE_R[GRADIENT1_FIRSTREP_INVERSE_R >= GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_R] = 1 GRADIENT1_SECONDREP_INVERSE_R[GRADIENT1_SECONDREP_INVERSE_R < GRADIENT1_SECONDREP_INVERSE_THRESHOLD_R] = 0 GRADIENT1_SECONDREP_INVERSE_R[GRADIENT1_SECONDREP_INVERSE_R >= GRADIENT1_SECONDREP_INVERSE_THRESHOLD_R] = 1 GRADIENT2_FIRSTREP_L[GRADIENT2_FIRSTREP_L < GRADIENT2_FIRSTREP_THRESHOLD_L] = 0 GRADIENT2_FIRSTREP_L[GRADIENT2_FIRSTREP_L >= GRADIENT2_FIRSTREP_THRESHOLD_L] = 1 GRADIENT2_SECONDREP_L[GRADIENT2_SECONDREP_L < GRADIENT2_SECONDREP_THRESHOLD_L] = 0 GRADIENT2_SECONDREP_L[GRADIENT2_SECONDREP_L >= GRADIENT2_SECONDREP_THRESHOLD_L] = 1 GRADIENT2_THIRDREP_L[GRADIENT2_THIRDREP_L < GRADIENT2_THIRDREP_THRESHOLD_L] = 0 GRADIENT2_THIRDREP_L[GRADIENT2_THIRDREP_L >= GRADIENT2_THIRDREP_THRESHOLD_L] = 1 GRADIENT1_THIRDREP_L[GRADIENT1_THIRDREP_L < GRADIENT1_THIRDREP_THRESHOLD_L] = 0 GRADIENT1_THIRDREP_L[GRADIENT1_THIRDREP_L >= GRADIENT1_THIRDREP_THRESHOLD_L] = 1 GRADIENT1_FIRSTANDSECONDREP_L[GRADIENT1_FIRSTANDSECONDREP_L < GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_L] = 0 GRADIENT1_FIRSTANDSECONDREP_L[GRADIENT1_FIRSTANDSECONDREP_L >= GRADIENT1_FIRSTANDSECONDREP_THRESHOLD_L] = 1 GRADIENT1_FIRSTREP_INVERSE_L[GRADIENT1_FIRSTREP_INVERSE_L < GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_L] = 0 GRADIENT1_FIRSTREP_INVERSE_L[GRADIENT1_FIRSTREP_INVERSE_L >= GRADIENT1_FIRSTREP_INVERSE_THRESHOLD_L] = 1 GRADIENT1_SECONDREP_INVERSE_L[GRADIENT1_SECONDREP_INVERSE_L < GRADIENT1_SECONDREP_INVERSE_THRESHOLD_L] = 0 GRADIENT1_SECONDREP_INVERSE_L[GRADIENT1_SECONDREP_INVERSE_L >= GRADIENT1_SECONDREP_INVERSE_THRESHOLD_L] = 1 # - ALLG1MINUS = GRADIENT1_FIRSTREP_INVERSE_L + GRADIENT1_SECONDREP_INVERSE_L + GRADIENT1_FIRSTREP_INVERSE_R + GRADIENT1_SECONDREP_INVERSE_R ALLG1 = GRADIENT1_FIRSTANDSECONDREP_L + GRADIENT1_THIRDREP_L + GRADIENT1_FIRSTANDSECONDREP_R + GRADIENT1_THIRDREP_R ALLG2 = GRADIENT2_FIRSTREP_L + GRADIENT2_SECONDREP_L + GRADIENT2_THIRDREP_L + GRADIENT2_FIRSTREP_R + GRADIENT2_SECONDREP_R + GRADIENT2_THIRDREP_R # + ### PLOT IN 2D MAP ZERO = GRADIENT2_THIRDREP_L * 0 G1FIRSTSECOND = GRADIENT1_FIRSTANDSECONDREP_L + GRADIENT1_FIRSTANDSECONDREP_R G1THIRD = GRADIENT1_THIRDREP_L + GRADIENT1_THIRDREP_R G1MINUSFIRST = GRADIENT1_FIRSTREP_INVERSE_L + GRADIENT1_FIRSTREP_INVERSE_R G1MINUSSECOND = GRADIENT1_SECONDREP_INVERSE_L + GRADIENT1_SECONDREP_INVERSE_R G2FIRST = GRADIENT2_FIRSTREP_L + GRADIENT2_FIRSTREP_R G2SECOND = GRADIENT2_SECONDREP_L + GRADIENT2_SECONDREP_R G3THIRD = GRADIENT2_THIRDREP_L + GRADIENT2_THIRDREP_R ALLMINUSG1 = G1MINUSFIRST + G1MINUSSECOND ALLG2 = G2FIRST + G2SECOND + G3THIRD ALLG1 = G1FIRSTSECOND + G1THIRD NOTHING = np.zeros((18142,)) CHANNEL1 = ALLMINUSG1 CHANNEL2 = ALLG2 CHANNEL3 = ALLG1 CHANNEL4 = NOTHING CHANNEL5 = NOTHING CHANNEL6 = NOTHING CHANNEL7 = NOTHING CHANNEL8 = NOTHING CHANNEL9 = NOTHING CHANNEL10 = NOTHING CHANNEL1_R = np.zeros((18142,)) CHANNEL1_G = np.zeros((18142,)) CHANNEL1_B = np.zeros((18142,)) CHANNEL2_R = np.zeros((18142,)) CHANNEL2_G = np.zeros((18142,)) CHANNEL2_B = np.zeros((18142,)) CHANNEL3_R = np.zeros((18142,)) CHANNEL3_G = np.zeros((18142,)) CHANNEL3_B = np.zeros((18142,)) CHANNEL4_R = np.zeros((18142,)) CHANNEL4_G = np.zeros((18142,)) CHANNEL4_B = np.zeros((18142,)) CHANNEL5_R = np.zeros((18142,)) CHANNEL5_G = np.zeros((18142,)) CHANNEL5_B = np.zeros((18142,)) CHANNEL6_R = np.zeros((18142,)) CHANNEL6_G = np.zeros((18142,)) CHANNEL6_B = np.zeros((18142,)) CHANNEL7_R = np.zeros((18142,)) CHANNEL7_G = np.zeros((18142,)) CHANNEL7_B = np.zeros((18142,)) CHANNEL8_R = np.zeros((18142,)) CHANNEL8_G = np.zeros((18142,)) CHANNEL8_B = np.zeros((18142,)) CHANNEL9_R = np.zeros((18142,)) CHANNEL9_G = np.zeros((18142,)) CHANNEL9_B = np.zeros((18142,)) CHANNEL10_R = np.zeros((18142,)) CHANNEL10_G = np.zeros((18142,)) CHANNEL10_B = np.zeros((18142,)) FINAL_CHANNEL_R = np.zeros((18142,)) FINAL_CHANNEL_G = np.zeros((18142,)) FINAL_CHANNEL_B = np.zeros((18142,)) CHANNEL1_Rvalue = 0 CHANNEL1_Gvalue = 1 CHANNEL1_Bvalue = 1 CHANNEL2_Rvalue = 1 CHANNEL2_Gvalue = 0 CHANNEL2_Bvalue = 1 CHANNEL3_Rvalue = 1 CHANNEL3_Gvalue = 0.5 CHANNEL3_Bvalue = 0 CHANNEL4_Rvalue = 1 CHANNEL4_Gvalue = 1 CHANNEL4_Bvalue = 1 CHANNEL5_Rvalue = 1 CHANNEL5_Gvalue = 1 CHANNEL5_Bvalue = 1 CHANNEL6_Rvalue = 1 CHANNEL6_Gvalue = 1 CHANNEL6_Bvalue = 1 CHANNEL7_Rvalue = 1 CHANNEL7_Gvalue = 1 CHANNEL7_Bvalue = 1 CHANNEL8_Rvalue = 1 CHANNEL8_Gvalue = 1 CHANNEL8_Bvalue = 1 CHANNEL9_Rvalue = 1 CHANNEL9_Gvalue = 1 CHANNEL9_Bvalue = 1 CHANNEL10_Rvalue = 1 CHANNEL10_Gvalue = 1 CHANNEL10_Bvalue = 1 CHANNEL1_R = [x+CHANNEL1_Rvalue for x in CHANNEL1_R] CHANNEL1_R = CHANNEL1_R * CHANNEL1 CHANNEL1_G = [x+CHANNEL1_Gvalue for x in CHANNEL1_G] CHANNEL1_G = CHANNEL1_G * CHANNEL1 CHANNEL1_B = [x+CHANNEL1_Bvalue for x in CHANNEL1_B] CHANNEL1_B = CHANNEL1_B * CHANNEL1 CHANNEL2_R = [x+CHANNEL2_Rvalue for x in CHANNEL2_R] CHANNEL2_R = CHANNEL2_R * CHANNEL2 CHANNEL2_G = [x+CHANNEL2_Gvalue for x in CHANNEL2_G] CHANNEL2_G = CHANNEL2_G * CHANNEL2 CHANNEL2_B = [x+CHANNEL2_Bvalue for x in CHANNEL2_B] CHANNEL2_B = CHANNEL2_B * CHANNEL2 CHANNEL3_R = [x+CHANNEL3_Rvalue for x in CHANNEL3_R] CHANNEL3_R = CHANNEL3_R * CHANNEL3 CHANNEL3_G = [x+CHANNEL3_Gvalue for x in CHANNEL3_G] CHANNEL3_G = CHANNEL3_G * CHANNEL3 CHANNEL3_B = [x+CHANNEL3_Bvalue for x in CHANNEL3_B] CHANNEL3_B = CHANNEL3_B * CHANNEL3 CHANNEL4_R = [x+CHANNEL4_Rvalue for x in CHANNEL4_R] CHANNEL4_R = CHANNEL4_R * CHANNEL4 CHANNEL4_G = [x+CHANNEL4_Gvalue for x in CHANNEL4_G] CHANNEL4_G = CHANNEL4_G * CHANNEL4 CHANNEL4_B = [x+CHANNEL4_Bvalue for x in CHANNEL4_B] CHANNEL4_B = CHANNEL4_B * CHANNEL4 CHANNEL5_R = [x+CHANNEL5_Rvalue for x in CHANNEL5_R] CHANNEL5_R = CHANNEL5_R * CHANNEL5 CHANNEL5_G = [x+CHANNEL5_Gvalue for x in CHANNEL5_G] CHANNEL5_G = CHANNEL5_G * CHANNEL5 CHANNEL5_B = [x+CHANNEL5_Bvalue for x in CHANNEL5_B] CHANNEL5_B = CHANNEL5_B * CHANNEL5 CHANNEL6_R = [x+CHANNEL6_Rvalue for x in CHANNEL6_R] CHANNEL6_R = CHANNEL6_R * CHANNEL6 CHANNEL6_G = [x+CHANNEL6_Gvalue for x in CHANNEL6_G] CHANNEL6_G = CHANNEL6_G * CHANNEL6 CHANNEL6_B = [x+CHANNEL6_Bvalue for x in CHANNEL6_B] CHANNEL6_B = CHANNEL6_B * CHANNEL6 CHANNEL7_R = [x+CHANNEL7_Rvalue for x in CHANNEL7_R] CHANNEL7_R = CHANNEL7_R * CHANNEL7 CHANNEL7_G = [x+CHANNEL7_Gvalue for x in CHANNEL7_G] CHANNEL7_G = CHANNEL7_G * CHANNEL7 CHANNEL7_B = [x+CHANNEL7_Bvalue for x in CHANNEL7_B] CHANNEL7_B = CHANNEL7_B * CHANNEL7 CHANNEL8_R = [x+CHANNEL8_Rvalue for x in CHANNEL8_R] CHANNEL8_R = CHANNEL8_R * CHANNEL8 CHANNEL8_G = [x+CHANNEL8_Gvalue for x in CHANNEL8_G] CHANNEL8_G = CHANNEL8_G * CHANNEL8 CHANNEL8_B = [x+CHANNEL8_Bvalue for x in CHANNEL8_B] CHANNEL8_B = CHANNEL8_B * CHANNEL8 CHANNEL9_R = [x+CHANNEL9_Rvalue for x in CHANNEL9_R] CHANNEL9_R = CHANNEL9_R * CHANNEL9 CHANNEL9_G = [x+CHANNEL9_Gvalue for x in CHANNEL9_G] CHANNEL9_G = CHANNEL9_G * CHANNEL9 CHANNEL9_B = [x+CHANNEL9_Bvalue for x in CHANNEL9_B] CHANNEL9_B = CHANNEL9_B * CHANNEL9 CHANNEL10_R = [x+CHANNEL10_Rvalue for x in CHANNEL10_R] CHANNEL10_R = CHANNEL10_R * CHANNEL10 CHANNEL10_G = [x+CHANNEL10_Gvalue for x in CHANNEL10_G] CHANNEL10_G = CHANNEL10_G * CHANNEL10 CHANNEL10_B = [x+CHANNEL10_Bvalue for x in CHANNEL10_B] CHANNEL10_B = CHANNEL10_B * CHANNEL10 FINAL_CHANNEL_R = CHANNEL1_R + CHANNEL2_R + CHANNEL3_R + CHANNEL4_R + CHANNEL5_R + CHANNEL6_R + CHANNEL7_R + CHANNEL8_R + CHANNEL9_R + CHANNEL10_R FINAL_CHANNEL_G = CHANNEL1_G + CHANNEL2_G + CHANNEL3_G + CHANNEL4_G + CHANNEL5_G + CHANNEL6_G + CHANNEL7_G + CHANNEL8_G + CHANNEL9_G + CHANNEL10_G FINAL_CHANNEL_B = CHANNEL1_B + CHANNEL2_B + CHANNEL3_B + CHANNEL4_B + CHANNEL5_B + CHANNEL6_B + CHANNEL7_B + CHANNEL8_B + CHANNEL9_B + CHANNEL10_B c = np.ones((18142, 4)) c[:,0] = FINAL_CHANNEL_R c[:,1] = FINAL_CHANNEL_G c[:,2] = FINAL_CHANNEL_B # + res = nib.load('result_cerebellumonly.dscalar.nii').get_data() res.shape = (8, 18142) res_component1 = res[0] res_component2 = res[1] res_component3 = res[2] res_component4 = res[3] res_component5 = res[4] res_component6 = res[5] res_component7 = res[6] res_component8 = res[7] import seaborn as sns sns.set(style="white") sns.set_context("notebook", font_scale=1.5) df = pd.DataFrame() df['e0'] = np.array(res_component1) df['e1'] = np.array(res_component2) df['e2'] = np.array(res_component3) df['e3'] = np.array(res_component4) df['e4'] = np.array(res_component5) df['e5'] = np.array(res_component6) df['e6'] = np.array(res_component7) df['e7'] = np.array(res_component8) import matplotlib.cm as cm sns.set_context(font_scale=2) sns.set_style("ticks", {"xtick.major.size": 0.001, "ytick.major.size": 0.001}) g = (sns.jointplot('e0', 'e1', data=df, size=10, label='big', linewidth=0, marker='.', alpha=0, color='w', # alpha=0.8, ylim = [np.min(df['e0']) - 0.5, np.max(df['e0']) + 0.5], xlim=[np.min(df['e1']) - 0.5, np.max(df['e1']) + 0.5], stat_func=None).set_axis_labels('Gradient 2', 'Gradient 1')) g.ax_joint.scatter(df.e1, df.e0, c=c) g.savefig(os.path.abspath('figure.png'), format='png', dpi=600, transparent=False) plt.show() # + ### PLOT TO FLATMAP import scipy as scipy import scipy.cluster as scipycluster import nibabel as nib import nibabel.gifti import nilearn from nilearn import datasets, plotting import sys import pandas as pd import numpy as np import os import matplotlib as mpl import matplotlib.pyplot as plt import pylab as plt from matplotlib.colors import ListedColormap mpl.rcParams['svg.fonttype'] = 'none' import sys sys.path.append('gradient_data/src/') from PIL import Image G1FIRSTSECOND = GRADIENT1_FIRSTANDSECONDREP_L + GRADIENT1_FIRSTANDSECONDREP_R G1THIRD = GRADIENT1_THIRDREP_L + GRADIENT1_THIRDREP_R G1MINUSFIRST = GRADIENT1_FIRSTREP_INVERSE_L + GRADIENT1_FIRSTREP_INVERSE_R G1MINUSSECOND = GRADIENT1_SECONDREP_INVERSE_L + GRADIENT1_SECONDREP_INVERSE_R G2FIRST = GRADIENT2_FIRSTREP_L + GRADIENT2_FIRSTREP_R G2SECOND = GRADIENT2_SECONDREP_L + GRADIENT2_SECONDREP_R G3THIRD = GRADIENT2_THIRDREP_L + GRADIENT2_THIRDREP_R G1FIRSTSECOND = G1FIRSTSECOND * 1 G1THIRD = G1THIRD * 2 G1MINUSFIRST = G1MINUSFIRST * 1 G1MINUSSECOND = G1MINUSSECOND * 2 G2FIRST = G2FIRST * 1 G2SECOND = G2SECOND * 2 G3THIRD = G3THIRD * 3 REPRESENTG1 = G1FIRSTSECOND + G1THIRD REPRESENTG2 = G1MINUSFIRST + G1MINUSSECOND REPRESENTG1MINUS = G2FIRST + G2SECOND + G3THIRD np.save('image.npy', REPRESENTG1) # + import nibabel as nib import numpy as np res = nib.load('hcp.tmp.lh.dscalar.nii').get_data() cortL = np.squeeze(np.array(np.where(res != 0)[0], dtype=np.int32)) res = nib.load('hcp.tmp.rh.dscalar.nii').get_data() cortR = np.squeeze(np.array(np.where(res != 0)[0], dtype=np.int32)) cortLen = len(cortL) + len(cortR) del res emb = np.load('image.npy') emb.shape emb.shape = (18142, 1) ### So that it is (18142, 1) instead of (18142, ) tmp = nib.load('cope1_cerebellumonly.dscalar.nii') ### Has to be dscalar with one map, and only cerebellum tmp_cifti = nib.cifti2.load('cope1_cerebellumonly.dscalar.nii') data = tmp_cifti.get_data() * 0 mim = tmp.header.matrix[1] for idx, bm in enumerate(mim.brain_models): print ((idx, bm.index_offset, bm.brain_structure)) img = nib.cifti2.Cifti2Image(emb.T, nib.cifti2.Cifti2Header(tmp.header.matrix)) img.to_filename('image.dscalar.nii') import subprocess # - # cd /om/user/xaviergp/gradient_data/ # + subprocess.check_output("wb_command -cifti-separate image.dscalar.nii COLUMN -volume-all image_nifti.nii", shell=True); subprocess.check_output('bash call_matlab.sh', shell=True); Image.open('image.jpg').show() ### call_matlab.sh contains the following: # matlab -nodesktop -nosplash -r "run codetomovetoSUITspaceandplotflatmap.m" ### codetomovetoSUITspaceandplotflatmap.m contains the following: #addpath /spm12 #addpath /spm12/compat #addpath /spm12/toolbox/DARTEL #addpath /spm12/toolbox/suit #job.subj.affineTr = {'/Affine_MNI152_T1_2mm_seg1.mat'}; #job.subj.flowfield = {'/u_a_MNI152_T1_2mm_seg1.nii,1'}; #job.subj.resample = {'image_nifti.nii,1'}; #job.subj.mask = {'/c_MNI152_T1_2mm_pcereb.nii,1'}; #job.interp = 0; #job.prefix = 'wc'; # #suit_reslice_dartel(job) #UNDERCMAP = [0.7 0.7 0.7] #CMAP = [0 1 1] #figure #Data = suit_map2surf('image_nifti.nii','space','SUIT', 'stats',@mode) #suit_plotflatmap(Data,'type','label','cmap',CMAP) #savefig('image') #fig = openfig('image.fig'); #filename = 'image.jpg'; #saveas(fig, filename) #clearvars ### The files Affine_MNI152_T1_2mm_seg1.mat, u_a_MNI152_T1_2mm_seg1.nii and c_MNI152_T1_2mm_pcereb.nii ### are generated using the "isolate" and "normalize using Dartel" of the SUIT toolbox (http://www.diedrichsenlab.org/imaging/suit_function.htm) ### MNI152_T1_2mm is the structural space used in the Human Connectome Project.
10_plot_all_extreme_5percent_group.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parseurs # # Dans ce notebook nous utiliserons le parseur [lxml](http://lxml.de/) qui est un binding de libxml2 et [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/) # ## Parser de l'html # # Beautiful Soup nous permet de parser simplement du contenu html. Même si le contenu est mal formé, le module bs reconstitue un arbre et offre des fonctions faciles à utiliser pour parcourir l'arbre ou y rechercher des éléments. # Beautiful Soup n'est pas un parseur, il utilise les parseurs et offre une API simplifiée à ses utilisateurs. # Nous travaillerons directement avec du contenu en ligne. Fini les exercices bidons, cette fois nous allons nous confronter à une question essentielle : combien d'accordages *open tuning* Neil Young utilise et comment sont-ils répartis dans son oeuvre ? # On trouve les infos sur les chansons de Neil Young et les accordages sur le fabuleux site [songx.se](http://songx.se/index.php) # Avec le module `urllib` nous allons pouvoir instancier un objet Beautiful Soup sans trop d'efforts # + from urllib.request import urlopen from bs4 import BeautifulSoup url = "http://songx.se/index.php" html = urlopen(url) soup = BeautifulSoup(html.read(), 'lxml') # - # Voilà nous avons maintenant un objet `soup` de classe Beautiful Soup. # La [doc](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) est très claire. # je cherche l'élement avec le tag 'title' print(soup.title) # le tag de l'élément print(soup.title.name) # le contenu textuel de l'élément print(soup.title.string) # Les informations qui nous intéressent sont contenues dans des éléments comme celui-ci : # `<div class="songrow"><a href="?song=505">Clementine</a> <small>(cover)</small><div style="float:right;">EADGBE</div></div>` # Où on trouve 1. le nom de la chanson ('Clementine') et l'accord utilisé ('EADGBE') # Pour trouver ces éléments dans l'arbre nous allons utiliser la méthode `find_all`. La méthode renvoie un iterable. for item in soup.find_all('div', attrs={'class':'songrow'})[:10]: print(item.a.string, item.div.string) # on peut aussi utiliser la notation suivante #for item in soup.find_all('div', class_="songrow"): # notre structure de données résultat # un dictionnaire avec en clé l'accordage et en valeur la liste des chansons qui utilisent cet accordage tunings = dict() for item in soup.find_all('div', attrs={'class':'songrow'}): song_title = item.a.string tuning = item.div.string if tuning in tunings: tunings[tuning].append(song_title) else: tunings[tuning] = [song_title] # 'Harvest Moon' utilise l'accordage DADGBE, y en a-t'il d'autres ? tunings['DADGBE'] # Combien de chansons pour chaque accordage ? for tuning in sorted(tunings.keys(), key=lambda x: len(tunings[x]), reverse=True): print("{}: {}".format(tuning, len(tunings[tuning]))) # Allez hop un histogramme # + # %matplotlib notebook import matplotlib.pyplot as plt values = [len(tunings[x]) for x in tunings] values plt.bar(range(0, len(values)), values) plt.xticks(range(0, len(values)), tunings.keys(), rotation=17) plt.show() # - # ## Parser de l'xml # Nous allons travailler sur un fichier au format TEI extrait du corpus *Corpus 14* # PRAXILING - UMR 5267 (PRAXILING) (2014). Corpus 14 [Corpus]. ORTOLANG (Open Resources and TOols for LANGuage) - www.ortolang.fr, https://hdl.handle.net/11403/corpus14/v1. # Le fichier se nomme ``josephine-1-150119.xml``. Il s'agit d'une lettre d'une femme de soldat à son époux. # Nous allons extraire du fichier TEI les informations suivantes : # - titre (``/TEI/teiHeader/fileDesc/titleStmt/title``) # - source (``/TEI/teiHeader/fileDesc/sourceDesc/p``) # - contenu de la lettre (``/TEI/text/body``) # ### Avec lxml # Pourquoi `lxml` et pas `xml.etree.ElementTree` ? Parce que : [1](http://lxml.de/intro.html) et surtout [2](http://lxml.de/performance.html) # La bonne nouvelle c'est que votre code sera aussi compatible avec `xml.etree.ElementTree` ou `xml.etree.cElementTree` parce que xml utilise l'API ElementTree. Sauf pour la méthode `xpath` qui est propre à `libxml`. # + from lxml import etree tree = etree.parse('josephine-1-150119.xml') root = tree.getroot() # Parcours des enfants de la racine (commentaires et éléments) for child in root: print(child.tag) # - # Le fichier utilise l'espace de nom TEI : ``<TEI xmlns="http://www.tei-c.org/ns/1.0">``, nous devrons l'indiquer dans nos instructions de recherche. # Voyons ça pour le titre (``/TEI/teiHeader/fileDesc/titleStmt/title``) # la méthode find renvoie le premier élément qui correspond au chemin argument (ElementPath et non Xpath) title = root.find("./tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title", namespaces={'tei':"http://www.tei-c.org/ns/1.0"}) print("Tag : {}".format(title.tag)) print("Texte : {}".format(title.text)) # Même traitement pour la source : source = root.find("./tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:p", namespaces={'tei':"http://www.tei-c.org/ns/1.0"}) print("Tag : {}".format(source.tag)) print("Texte : {}".format(source.text)) # lxml a aussi une méthode ``xpath`` qui permet d'utiliser directement des expressions xpath (sans oublier les espace de noms pour notre fichier) : source = root.xpath("/tei:TEI/tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:p", namespaces={'tei':'http://www.tei-c.org/ns/1.0'}) print(type(source)) #xpath retourne une liste print(source[0].text) #ou bien source = root.xpath("/tei:TEI/tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:p/text()", namespaces={'tei':'http://www.tei-c.org/ns/1.0'}) print(source[0]) # Pour le contenu il faut ruser. La difficulté ici tient à l'utilisation d'élements `<lb/>` de type 'milestones' pour noter les retours à la ligne : # ```xml # <p> # je reponse a ton aimableux lettres<lb/> # que nous a fait plaisir en naprenas<lb/> # que tu et enbonne santes car il<lb/> # anais de maime pour nous<lb/> # </p> # ``` # la méthode findall renvoie une liste avec tous les éléments correspondant au chemin argument body = root.findall("./tei:text/tei:body/tei:p", namespaces={'tei':"http://www.tei-c.org/ns/1.0"}) for elem in body: print(elem.text) # Ici on ne récupère que les noeuds text précédant les éléments `<lb/>` # Une requête `xpath` va nous permettre de récupérer tous les noeuds text body = root.xpath("//tei:text/tei:body//text()", namespaces={'tei':"http://www.tei-c.org/ns/1.0"}) for text in body: print(text, end="") # ## avec DOM # L'API `ElementTree` est propre à Python, `DOM` est une API indépendante d'un langage de programmation. Il existe des implémentations `DOM` dans la plupart des langages de programmation modernes. from xml.dom import minidom dom = minidom.parse("josephine-1-150119.xml") # l'objet Document dom title = dom.getElementsByTagNameNS("http://www.tei-c.org/ns/1.0", 'title')[0] # un seul élément 'title' dans le document print(title) # title est un objet Element, pour accèder au contenu textuel il faut récupérer le noeud texte print(title.lastChild.nodeName) print(title.lastChild.nodeValue) # idem pour la source, sauf qu'on ne peut pas se permettre de rechercher tous les éléments `p`. # Il faut trouver l'élément `p` fils de `sourceDesc` sourceDesc = dom.getElementsByTagNameNS("http://www.tei-c.org/ns/1.0", 'sourceDesc')[0] for node in sourceDesc.childNodes: if node.localName == "p": print(node.lastChild.nodeValue) # Et maintenant le contenu et ses éléments milestones. # Pour garder la forme vous réécrirez les boucles `for` suivies de `if` en listes en intension. body = dom.getElementsByTagNameNS("http://www.tei-c.org/ns/1.0", 'body')[0] for node in body.childNodes: if node.localName == "p" or "opener": for in_node in node.childNodes: if in_node.nodeName == "#text": print(in_node.nodeValue, end="")
parseurs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Checking the use of wcs and sip # Copied from <NAME>. # ## Preparation # At first, we prepare the data with referring the generate_case7.py. # ### Loading modules # #!/usr/bin/env python # -*- coding: utf-8 -*- from argparse import ArgumentParser as ap from astropy.table import QTable from astropy.coordinates import SkyCoord, Longitude, Latitude, Angle import numpy as np import pandas as pd import astropy.units as u import warpfield as w # ### Preparing functions # + seed = 42 np.random.seed(seed) def sip_distortion_generator(sip_x, sip_y): ''' Generate a distortion function compatible with the SIP notation. - c[m,n] = sip_x[m,n] - d[m,n] = sip_y[m,n] Values for (m,n) = (0,0), (1,0), (0,1) are ignored. ''' def apply_sip(x,y,param,norm=1e6): d = np.zeros_like(x) narr = np.arange(param.size) for m,n in np.ndindex(param.shape): if n==0 and m==0: continue if n==1 and m==0: continue if n==0 and m==1: continue if m+n<=2: d += param[m,n]*(x/norm)**m*(y/norm)**n*(norm**(m+n)) return d def distortion(position): position = np.array(position) x,y = position[0].copy(), position[1].copy() position[0] += apply_sip(x,y,sip_x) position[1] += apply_sip(x,y,sip_y) return position return distortion # - # ### Loading the source data # + from astropy.io import ascii as asc table = asc.read('/Users/dkawata/work/obs/projs/JASMINE-Mission/analysis-testing-e2e/jasmine_warpfield/challenge/case7/case7_source_list.txt') sources = SkyCoord(table['ra'], table['dec'], frame='icrs') # - # ### Preparing SIP coefficients (AP/BP matrices as c/d) # The values below is written in case7_challenge_00.txt. # + c = np.zeros(shape=(3, 3)) d = np.zeros(shape=(3, 3)) c[0,2]=-2.34153374723336e-09 c[1,1]=1.5792128155073916e-08 c[1,2]=7.674347291529089e-15 c[2,0]=-4.694743859349522e-09 c[2,1]=5.4256004358596465e-15 c[2,2]=-4.6341769281246224e-21 d[0,2]=-1.913280244657798e-08 d[1,1]=-5.622875292409728e-09 d[1,2]=-1.0128311203344238e-14 d[2,0]=3.1424733259527392e-09 d[2,1]=-9.08024075521211e-15 d[2,2]=-1.4123037013352912e-20 # - # ### Observing the sources # The pointing parameters are the same as the 0-th field written in case7_challenge_00_pointing.txt. # + lon = Longitude(265.62153148319356*u.deg) lat = Latitude(-28.851511272277236*u.deg) pa = Angle(301.8490035010801*u.deg) center = SkyCoord(lon, lat, frame='icrs') distortion = sip_distortion_generator(c, d) jasmine = w.Telescope(center, pa) jasmine.set_distortion(distortion) position = jasmine.observe(sources)[0] # - # ### Checking the distribution # + import matplotlib.pylab as plt plt.scatter(position['x'], position['y'], marker='x') plt.xlim([-25000, 25000]) plt.ylim([-25000, 25000]) plt.xlabel('x (um)') plt.ylabel('y (um)') # - plt.scatter(position['ra'], position['dec'], marker='x') plt.xlim([265.9, 265.3]) plt.ylim([-29.1, -28.6]) plt.xlabel('ra (deg)') plt.ylabel('dec (deg)') # The position angle was set to about 300 deg., but apparently the position angle seems to be about 60 deg, if it is defined as the angle between the Y axis and N axis measured in anti-clockwise direction. # ## WCS analysis # At first, we define a wcs-generator function. # + from astropy.wcs import WCS from astropy.wcs import Sip def wcs_generator(ra_ptg, dec_ptg, pa_ptg, scale, a=None, b=None, ap=None, bp=None): w = WCS(naxis=2) w.wcs.crpix=[0,0] w.wcs.cdelt=np.array([-scale, scale]) w.wcs.crval=[ra_ptg, dec_ptg] w.wcs.ctype=["RA---TAN-SIP", "DEC--TAN-SIP"] w.wcs.pc=[[ np.cos(pa_ptg*u.deg), -np.sin(pa_ptg*u.deg)], [np.sin(pa_ptg*u.deg), np.cos(pa_ptg*u.deg)]] # if a is not None and b is not None: w.sip = Sip(a, b, ap, bp, [0, 0]) return w # - # ### Calculating the plate scale f=jasmine.optics.focal_length*u.m scale=(1.*u.um/f*180./np.pi).si.value*u.deg/u.um print(scale) # ### Constructing wcs1 (No distortion) wcs1 = wcs_generator(lon.value, lat.value, pa.value, scale.value) # scale in deg/um. # ### Converting x/y to radec (without considering distortion) xy0 = np.concatenate(([position['x']], [position['y']])).T radec = wcs1.all_pix2world(xy0, 0) # ### Comparing x/y plt.scatter(radec[:,0], radec[:,1], marker='x', label='converted without dist.') plt.scatter(position['ra'], position['dec'], marker='+', label='true') # The xy values obtained with all_pix2world (but no distortion) is compared with that calculated by Ohsawa-san's tool (with distortion). # We cannot see significant difference in this plot. # ### Histogram of the differences in ra/dec dra = radec[:,0] - position['ra'] plt.hist(dra) ddec = radec[:,1] - position['dec'] plt.hist(ddec) # dra_arcsec = dra.to_value(u.arcsec) # ddec_arcsec = ddec.to_value(u.arcsec) plt.scatter(dra, ddec, marker='x') plt.xlabel('dRA (deg)') plt.ylabel('dDEC (deg)') # The difference is of ~10^-5. If the distortion parameters are set to zero, the difference reduces to ~10^-14. # ### wcs2 (AP/BP matrices are set) wcs2 = wcs_generator(lon.value, lat.value, pa.value, scale.value, ap=c, bp=d) # scale in deg/um. # ### Calculating radec with all_pix2world -> radec2 radec2 = wcs2.all_pix2world(xy0, 0) # ### Comparison between original ra/dec and calculated ra/dec dra = radec2[:,0] - position['ra'] plt.hist(dra) ddec = radec2[:,1] - position['dec'] plt.hist(ddec) # This plot looks very similar to the previous plot. Therefore, we can see that the AP/BP matrices are not used in the all_pix2world calculation. It seems to be natural because the matrices define the way to convert sky coordinates to pixel coordinates (inversion process of all_pix2world). # ### Converting ra/dec to x/y # At first, we define radec0 array which contains ra/dec values from the source file. radec0 = np.concatenate(([position['ra']], [position['dec']])).T # ### Using wcs1 (no distortion) -> xy1 xy1 = wcs1.all_world2pix(radec0, 0) plt.scatter(xy0[:, 0], xy0[:, 1], marker='x') plt.scatter(xy1[:, 0], xy1[:, 1], marker='+') # We cannot see significant difference. # ### Histogram of difference between true xy (xy0) and calculated xy (xy1) dx=xy1[:,0] - xy0[:,0] plt.hist(dx) dy=xy1[:,1] - xy0[:,1] plt.hist(dy) # ### Using wcs2 (with AP/BP matrices) -> xy2 xy2 = wcs2.all_world2pix(radec0, 0) # ### Histogram of difference between true xy (xy0) and calculated xy (xy2) dx = xy2[:,0] - xy1[:,0] plt.hist(dx) dy = xy2[:,1] - xy1[:,1] plt.hist(dy) plt.scatter(dx, dy, marker='x') plt.xlabel('dx') plt.ylabel('dy') # This plot looks very similar to the previous plot obtainted with wcs1. It seems that the AP/BP matrices are not used in all_world2pix function. # ### Another method for converting radec to xy with using AP/BP matrices -> xy3 xy3 = wcs2.sip_foc2pix(wcs2.wcs_world2pix(radec0, 0)-wcs2.wcs.crpix, 0) # ### Histogram of the difference between xy0 and xy3 dx = xy3[:,0] - xy0[:,0] plt.hist(dx) dy = xy3[:,1] - xy0[:,1] plt.hist(dy) plt.scatter(dx, dy, marker='x') plt.xlabel('dx') plt.ylabel('dy') # THe difference is very small. This method seems to successfully convert radec to xy with using AP/BP matrices. # If we want to use AP/BP matrices for converting radec to xy pixel coordinates, we have to use the last method. The all_world2pix method can treat only A/B matrices (not AP/BP matrices). Some discussion is available on the site below. # # https://github.com/astropy/astropy/issues/3675
tkamizuka/WorldToPixelConversion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import ipywidgets as widgets from ipywidgets import interact, interactive, Layout, HBox, VBox, GridBox, Dropdown, Label HTML_1 = widgets.HTML( value = "<font size = '100px'; font color = 'cyan'; font face = 'papyrus'><b>\ Mi pagina Web \ </b></font>", layout = Layout(width='auto', height='auto',border='0px solid yellow', padding='25px') ) HTML_2 = widgets.HTML( value = "<font size = '5px'; font color = 'yellow'; font face = 'Lucida Handwriting'><b>\ Desarrollado por: <NAME> \ </b></font>", layout = Layout(width='auto', height='auto',border='0px solid yellow', padding='0px', margin='25px 0px 5px 0px') ) VBox0 = VBox([HTML_1, HTML_2], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='auto', height='auto', grid_gap='0px'), ) display(VBox0) # + tags=[] def play_yt(song): if song == 'Magic': url = 'Sj6XOh__TYM' elif song == '85 Again': url = 'Z1oB2EDu5XA' elif song == 'From Now On': url = 'Z6r-RsxQXlY' elif song == 'Sweet Child of Mine': url = '1w7OgIMMRc4' elif song == 'In the end': url = 'HEUY8eI6ozQ' elif song == 'New divide': url = 'rGedjDnQ9cw' elif song == 'Y es que sucede asi': url = 'PHPQbHjWGfs' elif song == 'Malvada': url = 'JMgWe1DBlV8' elif song == 'Degeneracion actual': url = 'lHAc1eCE8uY' elif song == 'Science of Fluids': url = 'pqg5wSndz3c' elif song == 'Veritasium': url = 'gIwvFMiJNVU' elif song == '3Blue1Brown': url = 'zs5z5xJL0LM' elif song == '<NAME>': url = 'BZyPBdVdv9E' elif song == '<NAME>': url = 'g-5bi7dxHP4' elif song == '<NAME>': url = '5OMIb6AZpBI' elif song == 'Simscale': url = 'bKKXyiqHDwg' elif song == 'Onshape': url = '8KWr3pTxna0' elif song == 'Dlubal': url = 'hAr7Cs5j7Yk' elif song == '<NAME>': url = 'LAr6oAKieHk' elif song == '<NAME>': url = 'ZZUI6ndTWLw' elif song == '<NAME>': url = '_RqteZDjDmQ' elif song == '<NAME>': url = 'OUKGsb8CpF8' elif song == '<NAME>': url = 'p693u53Q10U' elif song == 'Ronaldinho': url = '_dBz4dTZocg' elif song == '<NAME>': url = 'ct8HQTNYfIs' elif song == 'Michael': url = 'Pp0bTc0TTtI' elif song == 'Phelps': url = 'GqaoPGw5JIs' width = "950px" height = "450px" urls = "https://www.youtube.com/embed/" + url HTML0.value = f"<iframe src={urls} width={width} height={height}>" def category(categoria): if categoria == 'Musica': HTML3.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Genero:</b></font>" HTML2.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Cancion:</b></font>" Dropdown2.options = ['Rock', 'Peruca', 'Synthwave'] Dropdown1.options = ['Sweet Child of Mine', 'Y es que sucede asi', 'Magic'] elif categoria == 'Ciencia': HTML3.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Rama:</b></font>" HTML2.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Topico:</b></font>" Dropdown2.options = ['Ingenieria', 'Fisica', 'Software'] Dropdown1.options = ['Science of Fluids', '<NAME>', 'Simscale'] elif categoria == 'Deporte': HTML3.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Disciplina:</b></font>" HTML2.value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Highlights:</b></font>" Dropdown2.options = ['Basketball', 'Football', 'Natacion'] Dropdown1.options = ['<NAME>', '<NAME>', '<NAME>'] def gender(genero): if genero == 'Rock': Dropdown1.options = ['Sweet Child of Mine', 'In the end', 'New divide'] elif genero == 'Peruca': Dropdown1.options = ['Y es que sucede asi', 'Malvada', 'Degeneracion actual'] elif genero == 'Synthwave': Dropdown1.options = ['Magic', '85 Again', 'From Now On'] elif genero == 'Ingenieria': Dropdown1.options = ['Science of Fluids', 'Veritasium', '3Blue1Brown'] elif genero == 'Fisica': Dropdown1.options = ['<NAME>', '<NAME>', '<NAME>'] elif genero == 'Software': Dropdown1.options = ['Simscale', 'Onshape', 'Dlubal'] elif genero == 'Basketball': Dropdown1.options = ['<NAME>', '<NAME>', '<NAME>'] elif genero == 'Football': Dropdown1.options = ['<NAME>', '<NAME>', 'Ronaldinho'] elif genero == 'Natacion': Dropdown1.options = ['<NAME>', 'Michael', 'Phelps'] file1 = open('85Again.mp3', 'rb') Audio1 = widgets.Audio( value = file1.read(), autoplay = False, controls = True, layout = Layout(justify_content='space-between', width='98.5%', height='20px'), ) display(Audio1) Dropdown1 = widgets.Dropdown( options = ['Magic', '85 Again', 'From Now On'], value ='Magic', #description = 'Cancion:', disabled = False, layout = Layout(width='120px') ) f_play_yt = interactive(play_yt, song = Dropdown1) Dropdown1.description=''; Dropdown2 = widgets.Dropdown( options = ['Rock', 'Peruca', 'Synthwave'], value ='Synthwave', #description = 'Cancion:', disabled = False, layout = Layout(width='120px') ) f_gender = interactive(gender, genero = Dropdown2) Dropdown2.description = ''; Dropdown3 = widgets.Dropdown( options = ['Musica', 'Ciencia', 'Deporte'], value ='Musica', #description = 'Cancion:', disabled = False, layout = Layout(width='120px') ) f_category = interactive(category, categoria = Dropdown3) Dropdown3.description = ''; width = "950px" height = "450px" urls = "https://www.youtube.com/embed/Sj6XOh__TYM" HTML0 = widgets.HTML( value = f"<iframe src={urls} width={width} height={height}>", ) HTML1 = widgets.HTML( value = "<font size = '10'; font color = 'cyan'; font face = 'Courier New'><b>Mi PlayList:</b></font>", layout = Layout(width='auto', height='100px') ) HTML2 = widgets.HTML( value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Cancion:</b></font>", layout = Layout(width='160px', height='100px') ) HTML3 = widgets.HTML( value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Genero:</b></font>", layout = Layout(width='160px', height='100px') ) HTML4 = widgets.HTML( value = "<font size = '5'; font color = 'cyan'; font face = 'Courier New'><b>Categoria:</b></font>", layout = Layout(width='160px', height='100px'), ) HBox1 = [ HBox([HTML1], layout=Layout(justify_content='space-between')), HBox([HTML4, Dropdown3], layout=Layout(justify_content='space-between')), HBox([HTML3, Dropdown2], layout=Layout(justify_content='space-between')), HBox([HTML2, Dropdown1], layout=Layout(justify_content='space-between')) ] VBox1 = VBox(HBox1, layout=Layout( border='0px solid yellow', padding='5px', align_items='baseline', width='100%', height='455px') ) GridBox1 = GridBox(children=[VBox1, HTML0], layout = Layout( width ='1285px', align_items ='stretch', grid_template_rows ='auto', grid_template_columns ='auto auto', grid_gap ='0px 20px', padding ='20px 0px 35px 0px') ) display(GridBox1) # - HTML_3 = widgets.HTML( value = "<font size = '40px'; font color = 'cyan'; font face = 'Courier New'><b>\ Proyectos de Ingenieria Universitaria \ </b></font>", layout = Layout(width='auto', height='auto',border='0px solid yellow', padding='0px', margin='0px 0px 25px 0px') ) display(HTML_3) # + tags=[] file2 = open("GIFS/Proyecto1.gif", "rb") image1 = file2.read() Image1 = widgets.Image( value = image1, format = 'gif', width = 500, height = 'auto', ) file3 = open("GIFS/Proyecto2.gif", "rb") image2 = file3.read() Image2 = widgets.Image( value = image2, format = 'gif', width = 500, height = 250, ) file4 = open("GIFS/Proyecto3.gif", "rb") image3 = file4.read() Image3 = widgets.Image( value = image3, format = 'gif', width = 500, height = 375, ) file5 = open("GIFS/Proyecto4.gif", "rb") image4 = file5.read() Image4 = widgets.Image( value = image4, format = 'gif', width = 500, height = 375, ) file6 = open("GIFS/Proyecto5.gif", "rb") image5 = file6.read() Image5 = widgets.Image( value = image5, format = 'gif', width = 500, height = 250, ) file7 = open("GIFS/Proyecto6.gif", "rb") image6 = file7.read() Image6 = widgets.Image( value = image6, format = 'gif', width = 500, height = 180, ) file8 = open("GIFS/Proyecto7.gif", "rb") image7 = file8.read() Image7 = widgets.Image( value = image7, format = 'gif', width = 500, height = 380, ) file9 = open("GIFS/Proyecto8.gif", "rb") image8 = file9.read() Image8 = widgets.Image( value = image8, format = 'gif', width = 500, height = 380, ) file10 = open("GIFS/Proyecto9.gif", "rb") image9 = file10.read() Image9 = widgets.Image( value = image9, format = 'gif', width = 500, height = 380, ) file11 = open("GIFS/prototipo.jpg", "rb") image10 = file11.read() Image10 = widgets.Image( value = image10, format = 'jpg', width = 500, height = 'auto', ) file12 = open("GIFS/Proyecto2_2.gif", "rb") image11 = file12.read() Image11 = widgets.Image( value = image11, format = 'gif', width = 500, height = 180, ) file13 = open("GIFS/Proyecto0.gif", "rb") image12 = file13.read() Image12 = widgets.Image( value = image12, format = 'gif', width = 500, height = 180, ) file14 = open("GIFS/Proyecto6_2.gif", "rb") image13 = file14.read() Image13 = widgets.Image( value = image13, format = 'gif', width = 500, height = 180, ) #1----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML5 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Diseño de un concentrador solar de disco parabolico de foco fijo</b></font>", layout = Layout(width='auto', height='auto') ) HTML6 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este proyecto es el de aprovechar la energia solar \ existente en la ciudad de Arequipa, mediante el uso de un concentrador \ solar de disco parabolico. A diferencia de los prototipos ya existentes \ de este tipo de concentradores, se ha desarrollado un nuevo modelo \ capaz de seguir el movimiento del sol manteniendo su foco en un punto fijo \ independientemente de la hora o ubicacion del concentrador solar. \ Ademas de el diseño, tambien \ se desarrollo un modelo matematico de transferencia de calor, que \ permite calcular el tiempo que tomara calentar cierta cantidad de \ agua bajo ciertos parametros de funcionamiento. Dicho modelo matematico se implemento como una \ <a href='https://mybinder.org/v2/gh/junior19a2000/WebApp/HEAD?urlpath=%2Fvoila%2Frender%2FWebSolarApp.ipynb'><em><font color='red'>aplicacion web</font></em></a> que puede ser utilizada por cualquier persona. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML7 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Python <br>\ • Onshape <br>\ • Simscale <br>\ • Cadasio \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML8 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Transferencia de Calor y Masa <br>\ • Algebra matricial <br>\ • Resistencia de Materiales <br>\ • Diseño de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox2 = VBox([HTML5, HTML6], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox3 = VBox([Image1, Image10], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='25px'), ) Accordion1 = widgets.Accordion( children = [VBox2, HTML7, HTML8], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion1.set_title(0, 'Descripcion del Proyecto') Accordion1.set_title(1, 'Softwares utilizados') Accordion1.set_title(2, 'Asignaturas relacionadas') Accordion1.selected_index = 0 HBox2 = HBox([VBox3, Accordion1], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #2----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML9 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Desarrollo de una aplicacion para el diseño de ejes de transmicion</b></font>", layout = Layout(width='auto', height='auto') ) HTML10 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este proyecto es el desarrollo de una aplicacion de escritorio \ que permita diseñar ejes de transmicion de potencia y otros componentes como \ correas, cadenas, engranajes y chavetas. El diseño del eje se realiza bajo \ los criterios de fatiga, resistencia, deformacion y velocidades criticas \ emplenado metodos analiticos y de elementos finitos, ya que la aplicacion se \ desarrollo con el fin de calcular cualquier tipo de eje, independientemente \ de sus parametros fisicosm como el numero de apoyos, el tipo de apoyos, fuerzas \ y momentos a los que este sometido, propiedades geometricas y mecanicas, etc. \ Para un mejor entendimiento de los resultados, el metodo de los elementos finitos \ permite realizar la simulacion de los desplazamientos que sufre el eje. El siguiente \ <a href='https://drive.google.com/file/d/1pp7nr3ccIa5y5m0T18hAzzW8pHCIYi-o/view?usp=sharing'><em><font color='red'>documento</font></em></a> muestra como puede ser utilizado este programa para el diseño de un eje de transmicion de potencia. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML11 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Matlab <br>\ • Appdesigner <br>\ • Overleaf <br>\ • Filmora \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML12 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Programacion en Matlab <br>\ • El metodo de los elementos finitos <br>\ • Resistencia de Materiales <br>\ • Diseño de elementos de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox4 = VBox([HTML9, HTML10], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox5 = VBox([Image2, Image11], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='25px'), ) Accordion2 = widgets.Accordion( children = [VBox4, HTML11, HTML12], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion2.set_title(0, 'Descripcion del Proyecto') Accordion2.set_title(1, 'Softwares utilizados') Accordion2.set_title(2, 'Asignaturas relacionadas') Accordion2.selected_index = 0 HBox3 = HBox([VBox5, Accordion2], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #3----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML13 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Desarrollo de una aplicacion para el calculo de estructuras 2D</b></font>", layout = Layout(width='auto', height='auto') ) HTML14 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este proyecto es el de desarrollar una apliacion que \ permita calcular estructuras bidimensionales mediante el metodo \ de los elementos finitos. La aplicacion permite dibujar la estructura, \ definir los apoyos, establecer las propiedades geometricas y mecanicas y \ calcular los esfuerzos y deformacion a las que esta sujeta dicha estructura \ producto de las cargas ingresadas por el usuario. \ Ademas de estos resultados, la aplicacion permite realizar una simulacion \ de los esfuerzos y desplazamientos para una mejor interpretacion de los \ resultados por parte del usuario. De esta manera el usuario es capaz de \ modificar la estructura y sus propiedades con el fin de obtener resultados optimos. En el siguiente \ <a href='https://drive.google.com/file/d/1T6Jrom5he2cBqYWv4Z4eEG3w_S2rvmZE/view?usp=sharing'><em><font color='red'>link</font></em></a> se encuentra el codido de la aplicacion. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML15 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Matlab <br>\ • Appdesigner <br>\ • Foxit Phantom <br>\ • Filmora \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML16 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • El metodo de los elementos finitos <br>\ • Algebra matricial <br>\ • Resistencia de Materiales <br>\ • Diseño Estructural \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox6 = VBox([HTML13, HTML14], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox7 = VBox([Image3], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='35px'), ) Accordion3 = widgets.Accordion( children = [VBox6, HTML15, HTML16], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion3.set_title(0, 'Descripcion del Proyecto') Accordion3.set_title(1, 'Softwares utilizados') Accordion3.set_title(2, 'Asignaturas relacionadas') Accordion3.selected_index = 0 HBox4 = HBox([VBox7, Accordion3], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #4----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML17 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Desarrollo de una aplicacion para el calculo de silos y estructuras 3D</b></font>", layout = Layout(width='auto', height='auto') ) HTML18 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este proyecto es el de desarrollar una aplicacion que \ permita calcular silos de almacenamiento y su estructura de soporte \ tridimensional, definiendo parametros geometricos y propiedades mecanicas \ tanto del silo como los de la estructura de soporte. \ La aplicacion calcula los esfuerzos y desplazamientos en ambas estructuras, \ permitiendo adicionalmente visualizar una simulacion de dichos desplazamientos. \ Gracias a esta aplicacion el usuario puede modificar los parametros principales \ de su diseño, hasta alcanzar un punto optimo de manera rapida y eficiente. \ La metodologia de calculo de los silos se basa en la norma UNE y el de la estructura \ es mediante el MEF teniendo en cuenta la norma ASME. El codigo de este programa se encuentra en el siguiente \ <a href='https://drive.google.com/file/d/1w7sqJsFFMzD06JVGSOU_6JL_uEsIAOvy/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML19 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Matlab <br>\ • Appdesigner <br>\ • Filmora <br>\ • Foxit Phantom \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML20 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • El metodo de los elementos finitos <br>\ • Calculo de silos de almacenamiento <br>\ • Resistencia de Materiales <br>\ • Diseño estructural \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox8 = VBox([HTML17, HTML18], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox9 = VBox([Image4], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='35px'), ) Accordion4 = widgets.Accordion( children = [VBox8, HTML19, HTML20], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion4.set_title(0, 'Descripcion del Proyecto') Accordion4.set_title(1, 'Softwares utilizados') Accordion4.set_title(2, 'Asignaturas relacionadas') Accordion4.selected_index = 0 HBox5 = HBox([VBox9, Accordion4], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #5----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML21 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Desarrollo de una aplicacion para el calculo de elementos bidimensionales</b></font>", layout = Layout(width='auto', height='auto') ) HTML22 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este proyecto es el de desarrollar una aplicacion \ que permita calcular los esfuerzos y desplazamientos de elementos \ bidimensionales mediante el metodo de los elementos finitos. \ Para lograr este objetivo, primero se desarrollo un algoritmo de mallado \ que permita definir la geometria inicial y los elementos que se crean a partir de esta \ con sus respectivos nodos. Gracias a este algoritmo, los resultados se pueden mostrar \ para cada uno de los elementos generados por el mallado y asi identificar \ las zonas de mayor esfuerzo o desplazamiento. Adicionalmente, el programa permite \ la simulacion de los desplazamientos a los que esta sometido el elemento 2D producto de las cargas \ ingresadas por el usuario. En el siguiente \ <a href='https://drive.google.com/file/d/1ftBon51p5d7ue_9yyNfBvwm5ljTkBz2h/view?usp=sharing'><em><font color='red'>link</font></em></a> se encuentra el codigo de la aplicacion. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML23 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Matlab <br>\ • Appdesigner <br>\ • Excel <br>\ • Filmora \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML24 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • El metodo de los elementos finitos <br>\ • Algebra matricial <br>\ • Resistencia de Materiales <br>\ • Diseño de elementos de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox9 = VBox([HTML21, HTML22], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox10 = VBox([Image5, Image12], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='0px'), ) Accordion5 = widgets.Accordion( children = [VBox9, HTML23, HTML24], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion5.set_title(0, 'Descripcion del Proyecto') Accordion5.set_title(1, 'Softwares utilizados') Accordion5.set_title(2, 'Asignaturas relacionadas') Accordion5.selected_index = 0 HBox6 = HBox([VBox10, Accordion5], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #6----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML25 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Estudio de la transferencia de calor por conveccion natural</b></font>", layout = Layout(width='auto', height='auto') ) HTML26 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este estudio fue el de analizar la transferencia de calor \ por conveccion natural en mi dormitorio con el proposito de entender y validar \ los conocimientos adquiridos del libro de transferencia de calor y masa de Cengel, \ en el tema de transferencia de calor por conveccion natural. \ Se obtuvieron resultados predecidos por la teoria, por lo que el analisis \ fue realizado de manera correcta. Aparte, se propuso ver el efecto del calor generado por las luces \ y equipos sobre la temperatura del cuarto y el confort termico del habitante. \ Ademas de este proyecto, se realizaron dos mas relacionados a la transferencia de calor por conduccion y conveccion forzada. \ El primero esta relacionado al desarrollo de un intercambiador de calor con agua y se puede visualizar en el siguiente \ <a href='https://drive.google.com/file/d/1NJ6MXzhvm5WSWnEZnNKLHr-80omhSrwW/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ Mientras que el segundo consiste en el desarrollo de una torre de enfriamiento con aire y agua, y se puede visualizar en el siguiente \ <a href='https://drive.google.com/file/d/1p1qE7a9uDxxXh0TTi-9oRibyCJVXzTuj/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML27 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Simscale <br>\ • Onshape <br>\ • Paraview <br>\ • Filmora \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML28 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Transferencia de Calor y Masa <br>\ • Conveccion natural <br>\ • Intercambiadores de calor <br>\ • Torres de enfriamiento \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox11 = VBox([HTML25, HTML26], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox12 = VBox([Image6, Image13], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='20px'), ) Accordion6 = widgets.Accordion( children = [VBox11, HTML27, HTML28], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion6.set_title(0, 'Descripcion del Proyecto') Accordion6.set_title(1, 'Softwares utilizados') Accordion6.set_title(2, 'Asignaturas relacionadas') Accordion6.selected_index = 0 HBox7 = HBox([VBox12, Accordion6], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #7----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML29 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Diseño del sistema oleohidraulico para accionar la compuerta que controla el caudal de agua de un canal</b></font>", layout = Layout(width='auto', height='auto') ) HTML30 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El tiempo de izaje dependerá de la velocidad de accionamiento al que va a estar sometido el sistema.<br> \ El área del vástago será el encargado de levantar la compuerta.<br> \ Observamos que la cantidad a elevar es cercana a las 10 Toneladas es por esa razón que los tiempos de extensión y retracción contaran con una cantidad aceptable de tiempo.<br> \ Se comprobó que a mayores presiones obtendremos mayores fuerzas para que esta pueda vencer el peso de izaje de la compuerta que sería la suma del peso de material más la fricción de las rodaduras.<br> \ Se concluyó que en la parte de los limites permisibles de los componentes oleohidraulicos usados en nuestro proyecto están en el rango estimado de la tabla de referencia de la ISO 4406.<br> \ La descripcion mas detallada de este trabajo se puede encontrar en el siguiente \ <a href='https://drive.google.com/file/d/1MVCIx9SZiS7L2EATQ-ry2WfxYNGCDrIf/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML31 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Automation Studio <br>\ • Onshape <br>\ • Filmora <br>\ • Word \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML32 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Oleohidraulica <br>\ • Neumatica <br>\ • Resistencia de Materiales <br>\ • Diseño de elementos de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox13 = VBox([HTML29, HTML30], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox14 = VBox([Image7], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='35px'), ) Accordion7 = widgets.Accordion( children = [VBox13, HTML31, HTML32], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion7.set_title(0, 'Descripcion del Proyecto') Accordion7.set_title(1, 'Softwares utilizados') Accordion7.set_title(2, 'Asignaturas relacionadas') Accordion7.selected_index = 0 HBox8 = HBox([VBox14, Accordion7], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #8----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML33 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Analisis del izaje de una compuerta hidraulica accionada por una caja reductora</b></font>", layout = Layout(width='auto', height='auto') ) HTML34 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este analisis fue calular los elementos necesarios para \ permitir el izaje de una compuerta que controla el caudal de un canal. \ Estos elementos fueron: motor electrico, caja reductora, engranajes rectos, \ engranajes helicoidales, ejes de trasmicion de las etapas de la caja reductora, \ rodamientos, corona, tornillo sin fin, y otros relacionados. \ El analisis inicial incluyo las dimensiones preliminares de la compuerta y \ la fuerza de izaje necesaria para poder levantar dicha compuerta. \ A partir de este analisis se procedio a calcular cada uno de los componentes \ ya mencionados. Finalmente, se seleccinaron los componentes segun catalogos de fabricantes. \ Parte de los calculos de los elementos mencionados, se pueden encontrar en el siguiente \ <a href='https://drive.google.com/file/d/1Sk2pmz7BhdZHBz6b5o4-iUPnKHoEYyHB/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML35 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Matlab <br>\ • Onshape <br>\ • Simscale <br>\ • Filmora \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML36 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Diseño de elementos de maquinas <br>\ • Algebra matricial <br>\ • Resistencia de Materiales <br>\ • Diseño de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox15 = VBox([HTML33, HTML34], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox16 = VBox([Image8], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='35px'), ) Accordion8 = widgets.Accordion( children = [VBox15, HTML35, HTML36], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion8.set_title(0, 'Descripcion del Proyecto') Accordion8.set_title(1, 'Softwares utilizados') Accordion8.set_title(2, 'Asignaturas relacionadas') Accordion8.selected_index = 0 HBox9 = HBox([VBox16, Accordion8], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #9----------------------------------------------------------------------------------------------------------------------------------------------------------- HTML37 = widgets.HTML( value = "<font size = '3.5'; font color = 'blue'; font face = 'Courier New'><b>Diseño de una planta procesadora de harina de maiz morado:</b></font>", layout = Layout(width='auto', height='auto') ) HTML38 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ El objetivo de este diseño fue el diseño de una planta procesadora \ de harina de maiz morado en la ciudad de Arequipa. Para ellos se vieron temas \ como el proceso industrial para el procesado de harina de maiz morado, \ la capacidad de trabajo, las dimensiones generales de la planta y los equipos \ necesarios para cada uno de los procesos de esta planta. Se realizo el calculo y diseño \ de los silos de almacenamientos, la estructura de soporte de los silos, tornillo transportador, \ elevador de cangilones y fajas transportadoras. Se hizo un analisis mas detallado en el comportamiento de la harina \ al salir de los silos, para asegurar un flujo requerido constante. Esto se realizo mediante el metodo de los \ elementos discretos en el software Newton, obteniendo como resultado final el numero de vueltas que el operario debe realizar \ para la correcta apertura de la tapa inferior de los silos. El analisis detallado de estos calculos se puede encontrar en el siguiente \ <a href='https://drive.google.com/file/d/1ohXldipNkT6eEyIwTizm4jm2Mr8rdzVe/view?usp=sharing'><em><font color='red'>link</font></em></a>. \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML39 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Newton DEM <br>\ • Onshape <br>\ • Simscale <br>\ • Matlab \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML40 = widgets.HTML( value = "<font size = '2.5'; font color = 'black'; font face = 'Courier New'><b> \ • Diseño de equipos de transporte de materiales <br>\ • Algebra matricial <br>\ • Resistencia de Materiales <br>\ • Diseño de elementos de maquinas \ </b></font>", layout = Layout(width='auto', height='auto') ) VBox17 = VBox([HTML37, HTML38], layout=Layout( border='0px solid yellow', padding='0px', align_items='stretch', width='auto', height='auto') ) VBox18 = VBox([Image9], layout=Layout( border='0px solid yellow', padding='0px', align_items='center', width='600px', height='auto', grid_gap='35px'), ) Accordion9 = widgets.Accordion( children = [VBox17, HTML39, HTML40], layout = Layout(justify_content='center', width='600px', height='auto') ) Accordion9.set_title(0, 'Descripcion del Proyecto') Accordion9.set_title(1, 'Softwares utilizados') Accordion9.set_title(2, 'Asignaturas relacionadas') Accordion9.selected_index = 0 HBox10 = HBox([VBox18, Accordion9], layout=Layout( border='0px solid yellow', padding='15px 20px 5px 0px', align_items='flex-start', width='1275', height='550px') ) #----------------------------------------------------------------------------------------------------------------------------------------------------------- Tab1 = widgets.Tab() Tab1.children = [HBox2, HBox3, HBox4, HBox5, HBox6, HBox7, HBox8, HBox9, HBox10] Tab1.layout = Layout(justify_content='space-between', width='1275px', height='620px', margin='0px 0px 40px 0px') for i in range(len(Tab1.children)): Tab1.set_title(i, ['Proyecto '+ str(i + 1)]) display(Tab1) # + def asignaturas(asignatura): if asignatura == 'Ciencias Generales': Select1.options = ['<NAME> - Chemistry'] elif asignatura == 'Resistencia de Materiales': Select1.options = ['<NAME>. - Mecánica de Materiales'] elif asignatura == 'Termodinamica': Select1.options = ['<NAME> - Termodinamica'] elif asignatura == 'Mecanica de Fluidos': Select1.options = ['<NAME> - Mecanica de Fluidos'] elif asignatura == 'Metodos Numericos': Select1.options = ['<NAME> - Métodos Numéricos para Ingenieros'] elif asignatura == 'Bombas y Turbinas Hidraulica': Select1.options = ['<NAME> - Fluid mechanics and hydraulic machines'] elif asignatura == 'Transferencia de Calor y Masa': Select1.options = ['<NAME> - Transferencia de calor y masa'] elif asignatura == 'Diseño de Elementos de Maquinas': Select1.options = ['<NAME> - Design of machine elements'] elif asignatura == 'Diseño de Equipos de Transporte de Materiales': Select1.options = ['Dietmar Schulze - Powders and Bulk Solids_ Behavior, Characterization, Storage and Flow'] elif asignatura == 'Diseño de Sistemas Termicos': Select1.options = ['Flynn, <NAME> - Kern''s Process Heat Transfer'] elif asignatura == 'Oleohidraulica y Neumatica': Select1.options = ['<NAME> - Pneumatic Conveying Design Guide'] elif asignatura == 'Refrigeracion y Aire Acondicionado': Select1.options = ['<NAME>., <NAME>., <NAME>. - Refrigeration and Air-Conditioning'] elif asignatura == 'El Metodo de los Elementos Finitos': Select1.options = ['A First Course in the Finite Element Method', \ 'Essentials of the finite element method for mechanical and structural engineers', \ 'Mecanica computacional en ingenieria con aplicaciones en Matlab'] elif asignatura == 'Programacion': Select1.options = ['<NAME> - Matlab for Engineers'] def libros(libro): width = "544px" height = "519px" if libro == 'A First Course in the Finite Element Method': urls = "https://drive.google.com/file/d/1Q1Y23QPRmDfFosntzNRvrWPlUu0MCaKU/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == 'Essentials of the finite element method for mechanical and structural engineers': urls = "https://drive.google.com/file/d/1qNKGyRA1NOJ6SrzAXUW-T97y9q2CL9PN/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == 'Mecanica computacional en ingenieria con aplicaciones en Matlab': urls = "https://drive.google.com/file/d/1CkVYy36zt6RndRbAV8SUfqgl-VbiE_MX/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Chemistry': urls = "https://drive.google.com/file/d/18ghdMieya7-lt9aJb0vbMHaPuns_Sxp8/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME>. - Mecánica de Materiales': urls = "https://drive.google.com/file/d/1LDt4gn7VhGPIJBuyMLT9aQv0fhtWCiCL/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == 'Yun<NAME> - Termodinamica': urls = "https://drive.google.com/file/d/1UEU4CrLk6LCsLqYvusA7kU7mCF8EK3uL/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == 'Yunus A. Çengel - Mecanica de Fluidos': urls = "https://drive.google.com/file/d/1R4LeFkS-y5Tw_sETB2g_2XQ30oNzwsvY/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Métodos Numéricos para Ingenieros': urls = "https://drive.google.com/file/d/1JmgdrRZ6MWuldxfDsbQkzYWkfYAQUB_4/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Fluid mechanics and hydraulic machines': urls = "https://drive.google.com/file/d/1jQHLKi_Yh024rg0FNUedOLvhwizUsp_k/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Transferencia de calor y masa': urls = "https://drive.google.com/file/d/15fwbFLSBiO3UGjHcZwkQvSqbhAzznolp/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Design of machine elements': urls = "https://drive.google.com/file/d/1NY_izXiWIG-AA6JcxvbBwqejGZ4e_a-e/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Powders and Bulk Solids_ Behavior, Characterization, Storage and Flow': urls = "https://drive.google.com/file/d/1b-4EPWGVIMi614GGJmHIvZTlVD4r6iML/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == 'Flynn, <NAME>, Louis - Kern''s Process Heat Transfer': urls = "https://drive.google.com/file/d/1QBDXqJyvLp-x2KLp8IBpnRr3E_6vzlhV/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Pneumatic Conveying Design Guide': urls = "https://drive.google.com/file/d/1mDEfCi0d0l7xeZAbsins1kK8CZJ4nnbk/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME>., <NAME>., <NAME>. - Refrigeration and Air-Conditioning': urls = "https://drive.google.com/file/d/1PBQBgv8fIaHR1xtqYw2faTe101m49A_b/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" elif libro == '<NAME> - Matlab for Engineers': urls = "https://drive.google.com/file/d/10vv4kqvb9iNTaS8mVUGdQIj2CFVewlNH/preview" HTML44.value = f"<iframe src={urls} width={width} height={height}>" HTML41 = widgets.HTML( value = "<font size = '40'; font color = 'cyan'; font face = 'Courier New'><b> \ Biblioteca personalizada \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML42 = widgets.HTML( value = "<font size = '5.5'; font color = 'cyan'; font face = 'Courier New'><b> \ Asignaturas \ </b></font>", layout = Layout(width='auto', height='auto') ) HTML43 = widgets.HTML( value = "<font size = '5.5'; font color = 'cyan'; font face = 'Courier New'><b> \ Libros \ </b></font>", layout = Layout(width='auto', height='auto') ) ToggleButtons1 = widgets.ToggleButtons( value='El Metodo de los Elementos Finitos', options=['Ciencias Generales', 'Resistencia de Materiales', 'Termodinamica', 'Mecanica de Fluidos', 'Metodos Numericos', 'Bombas y Turbinas Hidraulica', \ 'Transferencia de Calor y Masa', 'Diseño de Elementos de Maquinas', 'Diseño de Equipos de Transporte de Materiales', 'Diseño de Sistemas Termicos', \ 'Oleohidraulica y Neumatica', 'Refrigeracion y Aire Acondicionado', 'El Metodo de los Elementos Finitos', 'Programacion'], description='', disabled=False, button_style='primary', # 'success', 'info', 'warning', 'danger' or '' tooltips=['Ciencias Generales', 'Resistencia de Materiales', 'Termodinamica', 'Mecanica de Fluidos', 'Metodos Numericos', 'Bombas y Turbinas Hidraulica', \ 'Transferencia de Calor y Masa', 'Diseño de Elementos de Maquinas', 'Diseño de Equipos de Transporte de Materiales', 'Diseño de Sistemas Termicos', \ 'Oleohidraulica y Neumatica', 'Refrigeracion y Aire Acondicionado', 'El Metodo de los Elementos Finitos', 'Programacion'], layout=Layout(width='170px') ) f_asignaturas = interactive(asignaturas, asignatura = ToggleButtons1) ToggleButtons1.description=''; Select1 = widgets.Select( options=['A First Course in the Finite Element Method', \ 'Essentials of the finite element method for mechanical and structural engineers', \ 'Mecanica computacional en ingenieria con aplicaciones en Matlab'], # value='OSX', # rows=10, # description='OS:', disabled = False, layout = Layout(width='500px', height='406px') ) f_libros = interactive(libros, libro = Select1) Select1.description=''; width = "544px" height = "519px" urls = "https://drive.google.com/file/d/1Q1Y23QPRmDfFosntzNRvrWPlUu0MCaKU/preview" HTML44 = widgets.HTML( value = f"<iframe src={urls} width={width} height={height}>", ) VBox19 = VBox([HTML42, ToggleButtons1], layout=Layout( border='0px solid yellow', padding='0px', align_items='flex-start', width='auto', height='auto', grid_gap='25px'), ) VBox20 = VBox([HTML43, Select1], layout=Layout( border='0px solid yellow', padding='0px', align_items='flex-start', width='auto', height='auto', grid_gap='25px'), ) HBox10 = HBox([VBox19, VBox20], layout=Layout( border='0px solid yellow', padding='0px 0px 0px 0px', justify_content='space-between', width='700px', height='auto') ) VBox21 = VBox([HTML41, HBox10], layout=Layout( border='0px solid yellow', padding='0px', align_items='flex-start', width='auto', height='auto', grid_gap='20px'), ) HBox11 = HBox([VBox21, HTML44], layout=Layout( border='0px solid yellow', padding='20px 0px 0px 0px', justify_content='space-between', width='1280px', height='auto') ) display(HBox11)
JunYes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from scipy.spatial.distance import cdist # calc 2 point distance np.random.seed(11) # + means = [[2, 2], [8, 3], [3, 6]] cov = [[1, 0], [0, 1]] # Phan phoi chuan N = 500 X0 = np.random.multivariate_normal(means[0], cov, N) X1 = np.random.multivariate_normal(means[1], cov, N) X2 = np.random.multivariate_normal(means[2], cov, N) # Noi array X = np.concatenate((X0, X1, X2), axis = 0) K = 3 original_label = np.asarray([0]*N + [1]*N + [2]*N).T # - # Print points print(X.shape) # + def kmeans_display(X, label): K = np.amax(label) + 1 X0 = X[label == 0, :] X1 = X[label == 1, :] X2 = X[label == 2, :] plt.plot(X0[:, 0], X0[:, 1], 'b^', markersize = 4, alpha = .8) plt.plot(X1[:, 0], X1[:, 1], 'go', markersize = 4, alpha = .8) plt.plot(X2[:, 0], X2[:, 1], 'rs', markersize = 4, alpha = .8) plt.axis('equal') plt.plot() plt.show() kmeans_display(X, original_label) # + def kmeans_init_centers(X, k): # randomly pick k rows of X as initial centers return X[np.random.choice(X.shape[0], k, replace=False)] def kmeans_assign_labels(X, centers): # calculate pairwise distances btw data and centers D = cdist(X, centers) # return index of the closest center return np.argmin(D, axis = 1) def kmeans_update_centers(X, labels, K): centers = np.zeros((K, X.shape[1])) for k in range(K): # collect all points assigned to the k-th cluster Xk = X[labels == k, :] # take average centers[k,:] = np.mean(Xk, axis = 0) return centers def has_converged(centers, new_centers): # return True if two sets of centers are the same return (set([tuple(a) for a in centers]) == set([tuple(a) for a in new_centers])) # - def kmeans(X, K): centers = [kmeans_init_centers(X, K)] labels = [] it = 0 while True: labels.append(kmeans_assign_labels(X, centers[-1])) new_centers = kmeans_update_centers(X, labels[-1], K) if has_converged(centers[-1], new_centers): break centers.append(new_centers) it += 1 return (centers, labels, it) # + (centers, labels, it) = kmeans(X, K) print('Centers found by our algorithm:') print(centers[-1]) kmeans_display(X, labels[-1]) # - # lib from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=3, random_state=0).fit(X) print('Centers found by scikit-learn:') print(kmeans.cluster_centers_) pred_label = kmeans.predict(X) kmeans_display(X, pred_label)
_notebooks/K-means-clustering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_mxnet_p36 # language: python # name: conda_mxnet_p36 # --- # ## Local MNIST Training with MXNet and Gluon # # ### Pre-requisites # # This notebook shows how to use the SageMaker Python SDK to run your code in a local container before deploying to SageMaker's managed training or hosting environments. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. Just change your estimator's `train_instance_type` to `local`. You could also use `local_gpu` if you're using an ml.p2 or ml.p3 notebook instance, but then you'll need to set `train_instance_count=1` since distributed, local, GPU training is not yet supported. # # In order to use this feature you'll need to install docker-compose (and nvidia-docker if training with a GPU). Running the setup.sh script below will handle this for you. # # **Note, you can only run a single local notebook at one time.** # !/bin/bash ./setup.sh # ### Overview # # MNIST is a widely used dataset for handwritten digit classification. It consists of 70,000 labeled 28x28 pixel grayscale images of hand-written digits. The dataset is split into 60,000 training images and 10,000 test images. There are 10 classes (one for each of the 10 digits). This tutorial will show how to train and test an MNIST model on SageMaker local mode using MXNet and the Gluon API. # + import os import subprocess import boto3 import sagemaker from sagemaker.mxnet import MXNet from mxnet import gluon from sagemaker import get_execution_role sagemaker_session = sagemaker.Session() instance_type = "local" if subprocess.call("nvidia-smi") == 0: ## Set type to GPU if one is present instance_type = "local_gpu" print("Instance type = " + instance_type) role = get_execution_role() # - # ## Download training and test data gluon.data.vision.MNIST("./data/train", train=True) gluon.data.vision.MNIST("./data/test", train=False) # ## Uploading the data # # We use the `sagemaker.Session.upload_data` function to upload our datasets to an S3 location. The return value `inputs` identifies the location -- we will use this later when we start the training job. inputs = sagemaker_session.upload_data(path="data", key_prefix="data/mnist") # ## Implement the training function # # We need to provide a training script that can run on the SageMaker platform. The training scripts are essentially the same as one you would write for local training, except that you need to provide a `train` function. The `train` function will check for the validation accuracy at the end of every epoch and checkpoints the best model so far, along with the optimizer state, in the folder `/opt/ml/checkpoints` if the folder path exists, else it will skip the checkpointing. When SageMaker calls your function, it will pass in arguments that describe the training environment. Check the script below to see how this works. # # The script here is an adaptation of the [Gluon MNIST example](https://github.com/apache/incubator-mxnet/blob/master/example/gluon/mnist.py) provided by the [Apache MXNet](https://mxnet.incubator.apache.org/) project. # !cat 'mnist.py' # ## Run the training script on SageMaker # # The ```MXNet``` class allows us to run our training function on SageMaker local mode. We need to configure it with our training script, an IAM role, the number of training instances, and the training instance type. This is the the only difference from [mnist_with_gluon.ipynb](./mnist_with_gluon.ipynb). Instead of ``train_instance_type='ml.c4.xlarge'``, we set it to ``train_instance_type='local'``. For local training with GPU, we could set this to "local_gpu". In this case, `instance_type` was set above based on your whether you're running a GPU instance. m = MXNet( "mnist.py", role=role, instance_count=1, instance_type=instance_type, framework_version="1.6.0", py_version="py3", hyperparameters={ "batch-size": 100, "epochs": 20, "learning-rate": 0.1, "momentum": 0.9, "log-interval": 100, }, ) # After we've constructed our `MXNet` object, we fit it using the data we uploaded to S3. Even though we're in local mode, using S3 as our data source makes sense because it maintains consistency with how SageMaker's distributed, managed training ingests data. m.fit(inputs) # After training, we use the MXNet object to deploy an MXNetPredictor object. This creates a SageMaker endpoint locally that we can use to perform inference. # # This allows us to perform inference on json encoded multi-dimensional arrays. predictor = m.deploy(initial_instance_count=1, instance_type=instance_type) # We can now use this predictor to classify hand-written digits. Drawing into the image box loads the pixel data into a 'data' variable in this notebook, which we can then pass to the mxnet predictor. # + from IPython.display import HTML HTML(open("input.html").read()) # - # The predictor runs inference on our input data and returns the predicted digit (as a float value, so we convert to int for display). response = predictor.predict(data) print(int(response)) # ## Clean-up # # Deleting the local endpoint when you're finished is important since you can only run one local endpoint at a time. predictor.delete_endpoint()
sagemaker-python-sdk/mxnet_gluon_mnist/mxnet_mnist_with_gluon_local_mode.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import xarray as xr import fsspec # this should be replaced with the details of a new kubecluster from dask.distributed import Client from dask_kubernetes import KubeCluster # - cluster = KubeCluster() client = Client(cluster) # + path = 'gs://cmip6/ScenarioMIP/MOHC/UKESM1-0-LL/ssp585/r1i1p1f2/Omon/uo/gn/' ds = xr.open_zarr(fsspec.get_mapper(path), consolidated=True) da.mean([di for di in da.dims if di != 'time']).load()
test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import requests # 2013-2017 Migration Flow Data r = requests.get('https://api.census.gov/data/2017/acs/flows?get=COUNTY1_NAME,GEOID1,STATE2_NAME,COUNTY2_NAME,GEOID2,POP1YR,MOVEDIN,MOVEDOUT,MOVEDNET,NONMOVERS,SAMECOUNTY,FROMDIFFCTY,FROMDIFFSTATE,FROMABROAD,TODIFFCTY,TODIFFSTATE,TOPUERTORICO&for=county:*') df = pd.DataFrame(r.json()[1:]) df.columns = ['County1Name', 'County1FIPS', 'State2Name', 'County2Name', 'County2FIPS', 'County1Population', 'MovedIn', 'MovedOut', 'Net_Dom', 'NonMovers', 'Within_Same_County', 'From_Diff_County_Same_State', 'From_Diff_State', 'From_Abroad', 'To_Diff_County_Same_State', 'To_Diff_State', 'To_PR', 'State', 'County'] df = df.drop(['County'], axis=1) df['Year'] = list(np.full(len(df), 2017)) df = df.replace(' ', np.nan).fillna(0) df.to_csv('temp.csv', index=False) # To solve formatting issues (w/ large FIPS code- too long for Python to interpret as an int) adj_df = pd.read_csv('temp.csv') adj_df = adj_df[(adj_df.County2FIPS < 80000) & (adj_df.County2FIPS > 0)] # Excluding weird counties (distorted data) #adj_df['State2'] = adj_df.County2FIPS // 1000 # State FIPS ca_net = adj_df[adj_df.State == 6] ca_net.head(1) # ### States: vals = [] for fips in ca_net.State2Name.unique(): df = ca_net[ca_net.State2Name == fips] vals.append(df.Net_Dom.sum()) df = pd.DataFrame({'State':ca_net.State2Name.unique(), 'Net_Dom':vals}) df = df[df.State != 'California'].sort_values('Net_Dom') len(df[df.Net_Dom > 0]) df['More'] = list(df.State.values[:32]) + ['California']*19 df['Net_Dom_Str'] = df.Net_Dom.apply(lambda x:abs(x)) df.to_csv('CA_net_mig_states_1317.csv', index=False) # ### Counties: vals = [] counties = [] for fips in ca_net.County2FIPS.unique(): df = ca_net[ca_net.County2FIPS == fips] counties.append(df.County2Name.iloc[0] + ', ' + df.State2Name.iloc[0]) vals.append(df.Net_Dom.sum()) df = pd.DataFrame({'FIPS':ca_net.County2FIPS.unique(), 'County':counties, 'Net_Dom':vals}) df = df[df.FIPS//1000 != 6].sort_values('Net_Dom') # Excluding CA counties #df['County'] = df.Address.apply(lambda x:x.split(',')[0]) df len(df[df.Net_Dom < 0]) df['More'] = list(df.County.apply(lambda x:x.split(',')[0]).values[:1513]) + ['California']*813 df['Net_Dom_Str'] = df.Net_Dom.apply(lambda x:'{:,}'.format(abs(x))) df df.to_csv('CA_net_mig_counties_1317.csv', index=False) # ---
Notebooks/CA_Net_Mig_States_Counties.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Retry of getting stock ticker IPO dates after Excel ate my datafile. Also get ticker EOD data for the 20 stocks whose websites I scraped (3/15/21) # + from bs4 import BeautifulSoup import requests from requests import get from IPython.core.display import display, HTML # - from time import sleep import re from random import randint #avoid throttling by not sending too many requests one after the other from warnings import warn from time import time from IPython.core.display import clear_output import numpy as np import pandas as pd # !pip install tiingo import sys sys.path.append("/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages") from tiingo import TiingoClient # + config = {} # To reuse the same HTTP Session across API calls (and have better performance), include a session key. config['session'] = True ### You will need to get an API key from https://api.tiingo.com ### The API key is free and includes a max number of requests per hour and day, and data transfer. ### Paid account will give more requests and data transfer. # If you don't have your API key as an environment variable, # pass it in via a configuration dictionary. config['api_key'] = "<KEY>" #Initialize client = TiingoClient(config) # - import sys sys.path.append("/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages") # !pip install pandas-datareader import os import pandas_datareader as pdr companies_no_ipo_date = pd.read_csv("nasdaq_no_ipo_date.csv") companies.info() # + #stock_tickers = companies.Symbol.values.tolist() #print(stock_tickers[5]) #type(stock_tickers[5]) # + #no_ipo_date_companies = pd.read_csv("nasdaq_all_pharma_wo_ipo_date.csv") #no_ipo_date_companies.info() # - no_ipo_date_symbols = companies_no_ipo_date.Symbol.values.tolist() print(no_ipo_date_symbols[0:5]) #type(no_ipo_date_symbols[5]) # + #ticker_no_ipo_metadata = pdr.tiingo.TiingoMetaDataReader(no_ipo_date_symbols, pause=0.2, api_key='<KEY>') #['adjClose'] #print(type(df_no_ipo_metadata)) #pd.DataFrame.from_dict(ticker_no_ipo_metadata, orient='index') #df_no_ipo_metadata.to_csv('df_no_ipo_symbols_metadata.csv') #print(df_no_ipo_metadata) dict_list=[] for symbol in no_ipo_date_symbols: dict_no_ipo_date=client.get_ticker_metadata(symbol) dict_list.append(dict_no_ipo_date) df_symbol_no_ipo=pd.DataFrame(dict_list) df_symbol_no_ipo.head() # - df_symbol_no_ipo.to_csv('metadata_symbols_w_no_ipo_date.csv') # + ##Now have a csv file with metadata for all symbols that had no ##IPO date listed. Need to edit in excel to delete companies with ##IPOs after 2015. Then join with the csv file containing the ##tickers with IPO dates listed in the original data download. # - #ticker_metadata = client.get_ticker_metadata("GOOGL") #print(ticker_metadata) df_google=pd.DataFrame.from_dict(ticker_metadata, orient='index') df_google import datetime #dict_list=[] #for symbol in no_ipo_date_symbols: # dict_no_ipo_date=client.get_ticker_metadata(symbol) # dict_list.append(dict_no_ipo_date) #df_symbol_no_ipo=pd.DataFrame(dict_list) #df_symbol_no_ipo.head() df_symbol_no_ipo.info() df_symbol_no_ipo.to_csv('metadata_symbols_w_no_ipo_date.csv') #Read in csv file w all existing biotech/pharma companies traded on #NASDAQ, with IPOs before 2016 df_pre_2016_ipo_companies=pd.read_csv('nasdaq_pre_2016_ALL.csv') df_pre_2016_ipo_companies.info() #Make a list of ticker symbols of all existing biotech/pharma companies traded on #NASDAQ, with IPOs before 2016 pre_2016_ipo_symbols = df_pre_2016_ipo_companies.Symbol.values.tolist() print(pre_2016_ipo_symbols[0:5]) type(pre_2016_ipo_symbols[5]) type(pre_2016_ipo_symbols) ##I don't have the metadata for all these companies. It might be useful. ##Pass the list of ticker symbols into the client and retrieve the metadata. dict_list=[] for symbol in pre_2016_ipo_symbols: dict_pre_2016_metadata=client.get_ticker_metadata(symbol) dict_list.append(dict_pre_2016_metadata) df_nasdaq_pre_2016_metadata=pd.DataFrame(dict_list) df_nasdaq_pre_2016_metadata.head() df_nasdaq_pre_2016_metadata.to_csv('df_nasdaq_pre_2016_metadata.csv') df_nasdaq_pre_2016_metadata.info() # + ##I now have a csv file with metadata, ticker symbols, and IPO dates for all ##biotech/pharma stocks traded on NASDAQ, with pre-2016 IPOs. # - ##Make a list of the ticker symbols. pre_2016_ipo_symbols = df_nasdaq_pre_2016_metadata.ticker.values.tolist() print(pre_2016_ipo_symbols[0:5]) type(pre_2016_ipo_symbols[5]) type(pre_2016_ipo_symbols) # + ### HISTORICAL EOD PRICES DOWNLOAD ### THIS IS A RETRY WITH ONLY NASDAQ PHARMA/BIOTECH STOCKS WITH IPOs BEFORE 2016 ### THIS CODE BLOCK COLLECTS 4 YEARS OF DATA; 1/1/2016 TO 1/1/2020 #If I can pass in the list of ticker symbols stock_tickers, then I can have #the pandas datareader download the closing prices for 4 years of the biotech #and pharma stocks in my list. Then save it to a csv file. #stock_tickers import time startTime = time.time() pharma_2016_2020_hist_data_list=[] #use the list pre_2016_ipo_symbols from above. Get the data for each ticker symbol(this will be a pandas series), #and append it to the list. for ticker in pre_2016_ipo_symbols: try: ticker_prices = pdr.get_data_tiingo(ticker, start='2016-01-01', end='2020-01-01', pause=0.2, api_key='<KEY>') pharma_2016_2020_hist_data_list.append(ticker_prices) except KeyError as ke: print('KeyError ', ke) #Concatenate the list of series into a dataframe df_2016_2020_hist_data=pd.concat(pharma_2016_2020_hist_data_list, axis=0) #save the resulting dataframe to a csv file df_2016_2020_hist_data.to_csv('df_pharma_2016_2020_hist_data.csv') executionTime = (time.time() - startTime) print('Execution time in seconds: ' + str(executionTime)) # - df_ALL_pharma_companies=pd.read_csv('ALL_Symbols_screener_nyse_nasdaq_amex.csv') df_ALL_pharma_companies.info() df_ALL_pharma_companies.head() ALL_pharma_company_symbols = df_ALL_pharma_companies.Symbol.values.tolist() # + ##3/11/21 ##I'm going to fetch metadata for the full list of biotech/pharma ##companies, agnostic of IPO year. import time startTime = time.time() dict_list=[] for symbol in ALL_pharma_company_symbols: try: dict_company=client.get_ticker_metadata(symbol) dict_list.append(dict_company) except HTTPError as he: print(symbol,'HTTPError ', he) df_metadata_ALL_companies=pd.DataFrame(dict_list) df_metadata_ALL_companies.head() executionTime = (time.time() - startTime) print('Execution time in seconds: ' + str(executionTime)) # - df_metadata_ALL_companies.to_csv('df_metadata_ALL_companies.csv') df_temp= pd.DataFrame(dict_list) df_temp.tail() ##df_temp.info() df_temp.info() # + ##test dict_list_test=[] #for symbol in ALL_pharma_company_symbols: ticker_list=['EHC', 'ENZ', 'EW', 'gen', 'HRC'] for symbol2 in ticker_list: dict_company2=client.get_ticker_metadata(symbol2) dict_list_test.append(dict_company2) df_metadata_EW_ENZ=pd.DataFrame(dict_list_test) df_metadata_EW_ENZ # - df_metadata_EW_ENZ.info() # + ###Re-doing this with only Lilly LLY ### 4 YEARS OF DATA; 1/1/2016 TO 1/1/2020 import time startTime = time.time() pre_2016_ipo_symbols=['LLY'] lilly_2016_2020_hist_data_list=[] #use the list pre_2016_ipo_symbols from above. Get the data for each ticker symbol(this will be a pandas series), #and append it to the list. for ticker in pre_2016_ipo_symbols: try: ticker_prices = pdr.get_data_tiingo(ticker, start='2016-01-01', end='2020-01-01', pause=0.2, api_key='<KEY>') lilly_2016_2020_hist_data_list.append(ticker_prices) except KeyError as ke: print('KeyError ', ke) #Concatenate the list of series into a dataframe df_2016_2020_hist_data_lilly=pd.concat(lilly_2016_2020_hist_data_list, axis=0) #save the resulting dataframe to a csv file df_2016_2020_hist_data_lilly.to_csv('df_pharma_2016_2020_hist_data_lilly.csv') executionTime = (time.time() - startTime) print('Execution time in seconds: ' + str(executionTime)) # - df_2016_2020_hist_data_lilly.head() #pickle this dataframe import pickle with open('df_2016_2020_hist_data_lilly.pickle', 'wb') as to_write: pickle.dump(df_2016_2020_hist_data_lilly, to_write) df_2016_2020_hist_data_lilly.info() import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns #this dataframe has a multi index. Need to reset it so that the dates go into a column df_2016_2020_hist_data_lilly.reset_index(inplace=True) df_2016_2020_hist_data_lilly.info() df_2016_2020_hist_data_lilly.tail(1) fig, ax = plt.subplots() sns.scatterplot(x='date', y='adjClose', data=df_2016_2020_hist_data_lilly, ax=ax) #ax.set_xlim(2016, 2020) #ax.set_xticks(range(2016, 2020)) plt.show() #pickle this dataframe import pickle with open('df_2016_2020_hist_data_lilly_newindex.pickle', 'wb') as to_write: pickle.dump(df_2016_2020_hist_data_lilly, to_write) df_2016_2020_hist_data_lilly.to_csv('df_2016_2020_hist_data_lilly_newindex.csv') #symbol_list=[] #symbols=['EXEL', 'DNA'] #for symbol in symbols: # ticker_metadata=client.get_ticker_metadata(symbol) # print(ticker_metadata) # df_symbol=pd.DataFrame.from_dict(ticker_metadata, orient='index') # print(df_symbol) #start_date=df_symbol.get_value('startDate', 0) # print (start_date) # start_date=datetime(start_date) #if start_date<=2014-12-31: # symbol_list.append(symbol) # print(symbol_list) # + import time startTime = time.time() #####your python script##### executionTime = (time.time() - startTime) print('Execution time in seconds: ' + str(executionTime)) # - # + ##I did this earlier--got a list of all ticker symbols that Tiingo keeps track of. ##Not sure how helpful it was. #df_symbols=pdr.tiingo.get_tiingo_symbols() #df_symbols.to_csv('tiingo_symbols.csv') # + #If I can pass in the list of ticker symbols stock_tickers, then I can have #the pandas datareader download the closing prices for 5 years of the biotech #and pharma stocks in my list. Then save it to a csv file. #Also print the time it took to execute import time startTime = time.time() test4_hist_data_list=[] test4_list=['EXEL', 'MSFT'] for ticker in test4_list: df_test4_prices = pdr.get_data_tiingo(ticker, start='2015-01-01', end='2020-01-01', pause=0.2, api_key='<KEY>') #['adjClose'] test4_hist_data_list.append(df_test4_prices) print(type(test4_hist_data_list)) #test4_hist_data_list[0:5] #df_pharma_prices.to_csv('pharma_prices.csv') executionTime = (time.time() - startTime) print('Execution time in seconds: ' + str(executionTime)) # - #Each ticker gets its own list!!! Perfect! This gave me only the adjusted closing price print(test4_hist_data_list[0]) # + ## axis=0 is better; I won't end up with a dataframe full of NaNs!! df_test4_hist_data=pd.concat(test4_hist_data_list, axis=0) df_test4_hist_data # - # + ###I ran this earlier. I will copy-paste it above to run with the new list of ticker symbols. #If I can pass in the list of ticker symbols stock_tickers, then I can have #the pandas datareader download the closing prices for 5 years of the biotech #and pharma stocks in my list. Then save it to a csv file. #stock_tickers pharma_2015_2020_hist_data_list=[] #use the list pre_2015_ipo_symbols from above. Get the data for each ticker symbol(this will be a pandas series), #and append it to the list. for ticker in pre_2015_ipo_symbols: try: ticker_prices = pdr.get_data_tiingo(ticker, start='2015-01-01', end='2020-01-01', pause=0.2, api_key='307d0cd85ac1760c0dc887e433b4673cc352d8a1')['adjClose'] pharma_2015_2020_hist_data_list.append(ticker_prices) except KeyError as ke: print('KeyError ', ke) #Concatenate the list of series into a dataframe df_2015_2020_hist_data=pd.concat(pharma_2015_2020_hist_data_list, axis=1) #save the resulting dataframe to a csv file df_2015_2020_hist_data.to_csv('df_pharma_2015_2020_hist_data.csv') # - df_test.to_csv('df_test.csv') # + # TIINGO API - REQUEST DATA # https://www.tiingo.com #auth_token = r'<KEY>' # - # + price_list=[] for ticker in stock_tickers: #get request response = get("https://api.tiingo.com/tiingo/daily/" + ticker +"/prices?startDate=2015-1-1&endDate=2020-1-1") #price_list.append(response) #sleep(randint(1,5)) # + #price_list=[] #for ticker in stock_tickers: #get request response = get("https://api.tiingo.com/tiingo/daily/EXEL/prices?startDate=2015-1-1&endDate=2020-1-1") #price_list.append(response) print(response) #sleep(randint(1,5)) # - response=get("https://api.tiingo.com/tiingo/daily/EXEL/prices?startDate=2015-1-1&endDate=2020-1-1&format=csv&resampleFreq=monthly") print(response)
Notebook-0-Project-5-stock-price-scraping-Retry.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practice Content # # 将学到的Python知识用于实践:本次实验使用Django # # 使用Python 3编写Django程序! # # 按照如下链接,一步一步学用 Django。最终完成 Django tutorial里的工程项目 # # https://docs.djangoproject.com/en/2.2/ # # https://docs.djangoproject.com/zh-hans/2.2/ # # # 也就是说,按照以下链接的顺序 # https://docs.djangoproject.com/en/2.2/intro/tutorial01/ # # https://docs.djangoproject.com/en/2.2/intro/tutorial02/ # # https://docs.djangoproject.com/en/2.2/intro/tutorial03/ # # … # # 把 tutorial 里的工程项目在自己的电脑上实现一遍! # # --- # Django是一个很棒的Web应用框架,由Python写成 # # Django使得开发复杂的、数据库驱动的Web应用变得简单 # # Django注重组件的重用性和可插拔性,敏捷开发和DRY法则(Don't Repeat Yourself) # # https://www.djangoproject.com/ # # https://zh.wikipedia.org/wiki/Django # # 如果你在完成了Django tutorial之后,对Django还想了解更多,以下是一些资源: # # Don’t reinvent the wheel! 每次在打算自己实现一个Django App之前,都先找一找,看看是不是已经有现成的了。以下这个网站是索引,值得收藏: # # https://djangopackages.org/ # # https://djangopackages.org/grids/ # ## 快速学习HTML, CSS, JavaScript, jQuery等知识 # # https://www.w3schools.com/ # # http://www.w3school.com.cn/ # # ## Booststrap # # 不要自己从头写CSS。使用Bootstrap: # # http://getbootstrap.com/ # # ## PostgreSQL! # # 用真正的关系数据库代替SQLite。开源的PostgreSQL是个好选择 # # 它的数据库管理系统图形界面:pgAdmin # # ## 高阶技巧 # 进阶可看一本关于Django的书: # # 《Two Scoops of Django: Best Practices for Django》 # # https://www.twoscoopspress.com/ # # 如果阅读和Django有关的书籍,注意要选择使用最新Django版本的书。比如,现在阅读有关Django 1.5的书没有意义,因为Django 1.5已经过时了
Homework/Practice 04. DjangoTutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="0MRC0e0KhQ0S" # # Decision Tree Classification # > A tutorial On how to use Decision Tree Classification. # # - toc: true # - badges: true # - comments: true # - categories: [jupyter, Classification] # - # ## 0. Data Preprocessing # + [markdown] colab_type="text" id="LWd1UlMnhT2s" # ### 0.1 Importing the libraries # + colab={} colab_type="code" id="YvGPUQaHhXfL" import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] colab_type="text" id="K1VMqkGvhc3-" # ### 0.2 Importing the dataset # - dataset = pd.read_csv('Social_Network_Ads.csv') dataset # ### 0.3 Check if any null value dataset.isna().sum() dataset.info() # #### Drop User ID dataset.drop('User ID', axis=1, inplace=True) dataset.head() # + [markdown] colab={} colab_type="code" id="M52QDmyzhh9s" # ### 0.4 Split into X & y # - X = dataset.drop('Purchased', axis=1) X.head() y = dataset['Purchased'] y.head() # ### 0.5 Convert categories into numbers # + from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer categorical_feature = ["Gender"] one_hot = OneHotEncoder() transformer = ColumnTransformer([("one_hot", one_hot, categorical_feature)], remainder="passthrough") transformed_X = transformer.fit_transform(X) # - pd.DataFrame(transformed_X).head() # + [markdown] colab_type="text" id="YvxIPVyMhmKp" # ### 0.6 Splitting the dataset into the Training set and Test set # + colab={} colab_type="code" id="AVzJWAXIhxoC" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(transformed_X, y, test_size = 0.25, random_state = 2509) # + [markdown] colab_type="text" id="kW3c7UYih0hT" # ### 0.7 Feature Scaling # + colab={} colab_type="code" id="9fQlDPKCh8sc" from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # + [markdown] colab_type="text" id="bb6jCOCQiAmP" # ## 1.Training the model on the Training set # + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" executionInfo={"elapsed": 921, "status": "ok", "timestamp": 1586363337184, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}, "user_tz": -240} id="e0pFVAmciHQs" outputId="28d56fc8-fa33-49a6-f1a6-e320e91fbdda" from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy') classifier.fit(X_train, y_train) # - # ### 1.1 Score classifier.score(X_test,y_test) # + [markdown] colab_type="text" id="ZmePIEiiiKyw" # ## 2.Predicting the Test set results # + colab={} colab_type="code" id="aWK4AcCgiNwF" y_pred = classifier.predict(X_test) # + [markdown] colab_type="text" id="h4Hwj34ziWQW" # ### 2.2 Making the Confusion Matrix # + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" executionInfo={"elapsed": 896, "status": "ok", "timestamp": 1586363344426, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}, "user_tz": -240} id="D6bpZwUiiXic" outputId="82881019-9e70-4c9c-f9f9-ff7a42412fbf" from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) print(cm)
_notebooks/2020-10-26-Decision_tree_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # # How PyPy can help High Performance Computing # + [markdown] slideshow={"slide_type": "slide"} # # Short bio # # * PyPy core dev since 2006 # * pdb++, CFFI, vmprof, capnpy, ... # * @antocuni # * https://github.com/antocuni # * https://bitbucket.org/antocuni # + [markdown] slideshow={"slide_type": "slide"} # # How many of you use Python? # + [markdown] slideshow={"slide_type": "slide"} # # How many have ever had performance problems? # + [markdown] slideshow={"slide_type": "slide"} # # Why do you use Python, then? # + [markdown] slideshow={"slide_type": "slide"} # # Python strong points # # * Simplicity # * Lots of libraries # * Ecosystem # + [markdown] slideshow={"slide_type": "subslide"} # <img src="images/ken-treloar-346065-unsplash.jpg" /> # # <div align="right"><small>Photo by <a href="https://unsplash.com/photos/yjePAp-tpmQ"><NAME></a> on <a href="https://unsplash.com/">Unsplash</a></small></div> # + [markdown] slideshow={"slide_type": "slide"} # # Python *REAL* strong points # # * Expressive & simple APIs # * Uniform typesystem (everything is an object) # * Powerful abstractions # # + [markdown] slideshow={"slide_type": "slide"} # # Example: JSON # + [markdown] cell_style="center" slideshow={"slide_type": "slide"} # ```java # JSONObject jsonObj = new JSONObject(jsonString); # # JSONArray jArray = jsonObj.getJSONArray("data"); # int length = jArray.length(); # for(int i=0; i<length; i++) { # JSONObject jObj = jArray.getJSONObject(i); # String id = jObj.optString("id"); # String name=jObj.optString("name"); # # JSONArray ingredientArray = jObj.getJSONArray("Ingredients"); # int size = ingredientArray.length(); # ArrayList<String> Ingredients = new ArrayList<>(); # # for(int j=0; j<size; j++) { # JSONObject json = ja.getJSONObject(j); # Ingredients.add(json.optString("name")); # } # } # # // googled for "getJSONArray example", found this: # // https://stackoverflow.com/questions/32624166/how-to-get-json-array-within-json-object # # ``` # + [markdown] cell_style="center" slideshow={"slide_type": "slide"} # ```python # obj = json.loads(string) # for item in obj['data']: # id = item['id'] # name = item['name'] # ingredients = [] # for ingr in item["ingredients"]: # ingredients.append(ingr['name']) # ``` # + [markdown] slideshow={"slide_type": "slide"} # # So far so good, BUT # + [markdown] slideshow={"slide_type": "fragment"} # <center><img src="images/abstractions.svg" width="50%" /></center> # + [markdown] slideshow={"slide_type": "subslide"} # <center><img src="images/toy-story.jpg" width="100%" /></center> # + [markdown] slideshow={"slide_type": "slide"} # # Example of temporary objects # ## Bound methods # + slideshow={"slide_type": "fragment"} class A(object): def foo(self): return 42 a = A() bound_foo = a.foo # %timeit a.foo() # %timeit bound_foo() # + [markdown] slideshow={"slide_type": "slide"} # # Ideally # ### Think of concepts, not implementation details # # + [markdown] slideshow={"slide_type": "fragment"} # # Real world # ### Details leak to the user # + [markdown] slideshow={"slide_type": "slide"} # # Python problem # ### Tension between abstractions and performance # + [markdown] slideshow={"slide_type": "slide"} # # Classical Python approaches to performance # + [markdown] slideshow={"slide_type": "slide"} # # 1. Work around in the user code # ### e.g. create bound methods beforehand # + [markdown] slideshow={"slide_type": "slide"} # # 2. Work around in the language specs # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # * range vs xrange # * dict.keys vs .iterkeys # * int vs long # * array.array vs list # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # * Easier to implement # * Harder to use # * Clutter the language unnecessarily # * More complex to understand # * Not really Pythonic # + [markdown] slideshow={"slide_type": "slide"} # # 3. Stay in C as much as possible # - numbers = range(1000) % timeit [x*2 for x in numbers] # + import numpy as np numbers = np.arange(1000) % timeit numbers*2 # + [markdown] slideshow={"slide_type": "slide"} # # 4. Rewrite in C # # * `#include "Python.h"` # * Cython # * CFFI # # + [markdown] slideshow={"slide_type": "slide"} # # "Rewrite in C" approach # ## aka, 90/10 rule # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # <img src="images/90-10-rule-1.svg"> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # <img src="images/90-10-rule-2.svg"> # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # <img src="images/90-10-rule-3.svg"> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # * Abstractions cost # * Code quality => poor performance # * Python parts become relevant # + [markdown] slideshow={"slide_type": "slide"} # # Python in the HPC world # + [markdown] slideshow={"slide_type": "fragment"} # # Python as a glue-only language # + [markdown] cell_style="center" slideshow={"slide_type": "fragment"} # # Tradeoff between speed and code quality # + [markdown] slideshow={"slide_type": "slide"} # # PyPy # + [markdown] slideshow={"slide_type": "fragment"} # * Alternative Python implementation # * Ideally: no visible difference to the user # * JIT compiler # * http://pypy.org # + [markdown] slideshow={"slide_type": "slide"} # # How fast is PyPy? # + [markdown] slideshow={"slide_type": "fragment"} # ### Wrong question # + [markdown] slideshow={"slide_type": "fragment"} # * Up to 80x faster in extreme cases # * 10x faster in good cases # * 2x faster on "random" code # * sometime it's just slower # + [markdown] slideshow={"slide_type": "slide"} # # PyPy flaws # + [markdown] slideshow={"slide_type": "fragment"} # * Far from being perfect # * it leaks *other* implementation details than CPython # - e.g. JIT warmup, GC pecularities # # + [markdown] slideshow={"slide_type": "slide"} # # PyPy qualities # + [markdown] slideshow={"slide_type": "subslide"} # # Make pythonic, idiomatic code fast # + [markdown] slideshow={"slide_type": "subslide"} # # Abstractions are (almost) free # # # + [markdown] slideshow={"slide_type": "subslide"} # # The better the code, the biggest the speedup # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # <img src="images/90-10-rule-1.svg"> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # <img src="images/90-10-rule-pypy.svg"> # + [markdown] slideshow={"slide_type": "slide"} # # Python as a first class language # # No longer "just glue" # + [markdown] slideshow={"slide_type": "slide"} # # Example: Sobel filter # # * Extendend version # - "The Joy of PyPy: Abstractions for Free", EP 2017 # # * https://speakerdeck.com/antocuni/the-joy-of-pypy-jit-abstractions-for-free # * https://www.youtube.com/watch?v=NQfpHQII2cU # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # <img src="images/sobel-cpython.png"> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # <img src="images/sobel-pypy.png"> # + [markdown] slideshow={"slide_type": "fragment"} # # 59x ⇨ 428x faster # + [markdown] slideshow={"slide_type": "slide"} # # The *BIG* problem: C extensions # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # ## CPython # <img src="images/calling-C-cpython.svg"> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # ## PyPy (cpyext) # <img src="images/calling-C-cpyext.svg"> # + [markdown] slideshow={"slide_type": "slide"} # # cpyext # # * PyPy version of `Python.h` # * Compatibility layer # * Most C extensions just work: numpy, scipy, pandas, etc. # * Slow :( # * Use CFFI whenever it's possible # + [markdown] slideshow={"slide_type": "slide"} # # We are working on it # + slideshow={"slide_type": "skip"} # %matplotlib inline def plot_benchmarks(*pythons): import numpy as np import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams['figure.figsize'] = (15,6) data = {"CPython": {"simple.noargs": 0.43, "simple.onearg(None)": 0.45, "simple.onearg(i)": 0.44, "simple.varargs": 0.6, "simple.allocate_int": 0.46, "simple.allocate_tuple": 0.81, "Foo().noargs": 0.44, "Foo().onearg(None)": 0.48, "Foo().onearg(i)": 0.47, "Foo().varargs": 0.63, "len(Foo())": 0.34, "Foo()[0]": 0.25}, "PyPy 5.8": {"simple.noargs": 1.09, "simple.onearg(None)": 1.34, "simple.onearg(i)": 2.6, "simple.varargs": 2.74, "simple.allocate_int": 2.49, "simple.allocate_tuple": 8.21, "Foo().noargs": 1.27, "Foo().onearg(None)": 1.55, "Foo().onearg(i)": 2.85, "Foo().varargs": 3.06, "len(Foo())": 1.36, "Foo()[0]": 1.53}, "PyPy 5.9": {"simple.noargs": 0.16, "simple.onearg(None)": 0.2, "simple.onearg(i)": 1.61, "simple.varargs": 3.08, "simple.allocate_int": 1.69, "simple.allocate_tuple": 6.39, "Foo().noargs": 1.17, "Foo().onearg(None)": 1.74, "Foo().onearg(i)": 3.03, "Foo().varargs": 2.95, "len(Foo())": 1.24, "Foo()[0]": 1.37}, "PyPy 5.10": {"simple.noargs": 0.18, "simple.onearg(None)": 0.21, "simple.onearg(i)": 1.52, "simple.varargs": 2.59, "simple.allocate_int": 1.67, "simple.allocate_tuple": 6.44, "Foo().noargs": 1.12, "Foo().onearg(None)": 1.41, "Foo().onearg(i)": 2.62, "Foo().varargs": 2.89, "len(Foo())": 1.21, "Foo()[0]": 1.32}, "PyPy 6.0": {"simple.noargs": 0.18, "simple.onearg(None)": 0.2, "simple.onearg(i)": 0.22, "simple.varargs": 0.42, "simple.allocate_int": 0.89, "simple.allocate_tuple": 5.02, "Foo().noargs": 0.19, "Foo().onearg(None)": 0.22, "Foo().onearg(i)": 0.24, "Foo().varargs": 0.45, "len(Foo())": 0.15, "Foo()[0]": 0.28}} #pythons = data.keys() #pythons = ["CPython", "PyPy 5.10", "PyPy 6.0"] benchmarks = data[pythons[0]].keys() # create plot fig, ax = plt.subplots() index = np.arange(len(benchmarks)) bar_width = 0.20 opacity = 0.8 colors = 'bgryk' for i, python in enumerate(pythons): values = [data[python][bench] for bench in benchmarks] normalized = [v/data['CPython'][bench] for (v, bench) in zip(values, benchmarks)] #print python, values rects1 = plt.bar(index + bar_width*i, normalized, bar_width, label=python, color=colors[i]) plt.xlabel('Benchmark') plt.ylabel('Time (normalized)') plt.title('cpyext microbenchmarks') plt.xticks(index + bar_width, benchmarks, rotation=45) plt.legend() plt.show() # - plot_benchmarks("CPython", "PyPy 5.8", "PyPy 6.0") # + [markdown] slideshow={"slide_type": "fragment"} # # Future status (hopefully) # * All C extensions will just work # * C code as fast as today, Python code super-fast # * The best of both worlds # * PyPy as the default choice for HPC # # + [markdown] slideshow={"slide_type": "fragment"} # # My personal estimate: 6 months of work and we have a fast cpyext # # (let's talk about money :)) # + [markdown] slideshow={"slide_type": "slide"} # # That's all # ## Questions?
pypy-for-HPC/PyPy for High Performance Computing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <h1> ILI286 - Computación Científica II </h1> # <h2> Ecuaciones Diferenciales Parciales: Elípticas </h2> # <h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2> # <h2> Version: 1.14</h2> # </center> # # Tabla de Contenidos # * [Introducción](#intro) # * [Marco Teórico](#teo) # * [FEM en 1D](#1d) # * [Condiciones de Borde de Dirichlet](#dir) # * [Condiciones de Borde de Neumann](#neu) # * [FEM en 2D](#2d) # * [Acknowledgements](#acknowledgements) # %matplotlib inline import numpy as np from scipy.sparse.linalg import spsolve from scipy.sparse import dia_matrix from matplotlib import pyplot as plt from numpy.linalg import solve, norm, lstsq from pdb import set_trace as st # + def Error(x, u, u_h): h = x[1]-x[0] e = norm(u(x)-u_h) * np.sqrt(h) return e def SolutionPlot(x, u, u_h): plt.figure(figsize=(10,6)) xx = np.linspace(x[0], x[-1], 10*len(x)) plt.plot(xx, u(xx), '-', label="u(x)") plt.plot(x, u_h, 'o--', label="u_h(x)") plt.xlabel('x') plt.ylabel('u(x)') plt.legend(loc=0, numpoints=1) plt.show() def ErrorPlot(h_list, error_list): plt.figure(figsize=(10,6)) # First plot plt.subplot(2,1,1) plt.plot(h_list, error_list, 'gs--', label='$||u-u_h||_{L^2}$', lw=1.5, mew=1.5) plt.xlabel('h') plt.ylabel('$L^2$ norm error') plt.legend(loc=0, numpoints=1) #plt.gca().yaxis.set_major_formatter(plt.FormatStrFormatter('%1.2E')) # log Error h plot plt.subplot(2,1,2) plt.semilogy(h_list, error_list, 'go--',label='$||u-u_h||_{L^2}$', lw=1.5, mew=1.5) #plt.plot(log_h, m*log_h+c, 'k-',label='%1.2f log(h) + %1.2f' %(m,c)) plt.xlabel('h') plt.ylabel('$L^2$ norm log error') plt.legend(loc=0, numpoints=1) # Least squares fit of the loglog plt.show() return # - # <div id='intro' /> # # # Introducción # # Al igual que en el anterior, en el siguiente notebook se estudiará la resolución numérica de **ecuaciones diferenciales parciales elípticas**. Sin ser necesaria la repetición de la introducción a las PDE, cabe destacar que en esta ocasión variaremos el método utilizado para resolverlas, siendo éste el método de **elementos finitos** (FEM), el cual es un método muy popular en la ingeniería. # # A continuación veremos una breve explicación del método junto con aplicaciones prácticas al caso 1D y 2D. # <div id='teo' /> # # # <NAME> # # El método de elementos finitos consiste, en palabras simples, en la discretización del dominio (cuerpo) sobre el cual queremos resolver la ecuación diferencial en cuestión. Esto lo logramos mediante la definición de numerosos subdominios no intersectantes entre sí llamados elementos finitos. # # Al lograr expresar la ecuación diferencial que deseamos resolver en su forma débil o variacional (en forma de integral), se establecen condiciones de vínculo entre los elementos finitos de nuestro dominio, generándose un sistema de ecuaciones que debe ser resuelto numéricamente para obtener nuestra solución aproximada. # # A modo de ejemplo, consideremos la siguiente ecuación en 1D: # # \begin{align*} # -\frac{d^2u(x)}{dx^2} + u(x) = f(x) \ , \ x \in [0,1] \\ # \end{align*} # # \begin{align*} # u(0) = u(1) = 0 # \end{align*} # # Multiplicando por $v(x)$ (con $v(0)=v(1)=0$) e integrando obtenemos: # # \begin{align*} # - \int_0^1 \frac{d^2u(x)}{dx^2}v(x)dx + \int_0^1 u(x)v(x)dx = \int_0^1 f(x)v(x)dx \\ # \end{align*} # # Si integramos por partes el primer término obtenemos: # # \begin{align*} # -\int_0^1 \frac{d^2u(x)}{dx^2}v(x)dx = -\left. \frac{du(x)}{dx}v(x)\right|^1_0 + \int_0^1 \frac{du(x)}{dx}v'(x)dx # \end{align*} # # Debido a nuestra definición de $v(x)$ podemos eliminar el término en donde esta función es evaluada en los bordes, por lo que nuestra ecuación en su forma variacional sería: # # \begin{align*} # \int_0^1 \frac{du(x)}{dx}\frac{dv(x)}{dx}dx + \int_0^1 u(x)v(x)dx = \int_0^1 f(x)v(x)dx # \end{align*} # # Lo que sigue es expresar nuestra solución $u(x)$ como una combinación lineal de una base finita del espacio $L^2[0,1]$, digamos $\langle \phi_0(x),\phi_1(x),\dots,\phi_{n+1}(x)\rangle$. En otras palabras, $u(x) = \sum_{i=0}^{n+1}c_i\phi_i(x)$, y adicionalmente, redefinimos $v(x)=\phi_k(x)$, para $k=1:n$. Por lo tanto, nuestra ecuación se transforma en: # # \begin{align*} # \sum_{i=0}^{n+1}c_i \left( \int_0^1 \phi_i'(x)\phi_k'(x)dx + \int_0^1 \phi_i(x)\phi_k(x)dx \right) = \int_0^1 f(x)\phi_k(x)dx # \end{align*} # # Luego, definiendo $\phi_i(x)$ como: # # \begin{equation*} # \phi_i(x) = \left\{ # \begin{array}{ll} # \dfrac{x-x_{i-1}}{x_i-x_{i-1}} \; & \text{si } x_{i-1} < x \leq x_i \\ # \dfrac{x_{i+1}-x}{x_{i+1}-x_i} \; & \text{si } x_i < x \leq x_{i+1} \\ # 0 \; & \text{en otros casos} # \end{array} \right. # \end{equation*} # # Obtenemos los siguientes valores para las integrales: # # \begin{equation*} # \begin{array}{rl} # \int_0^1 \phi_i(x)\phi_{i+1}(x)dx =& \dfrac{\Delta x}{6} \\ # \int_0^1 (\phi_i(x))^2dx =& \dfrac{2 \Delta x}{3} \\ # \int_0^1 \phi_i'(x)\phi_{i+1}'(x)dx =& - \dfrac{1}{\Delta x} \\ # \int_0^1 (\phi_i'(x))^2dx =& \dfrac{2}{\Delta x} # \end{array} # \end{equation*} # # Por lo que finalmente, al discretizar el dominio $[0,1]$ en $n+2$ puntos $x_0,x_1,x_2,\dots,x_n,x_{n+1}$, todo se resume en resolver el siguiente sistema de ecuaciones: # # \begin{equation*} # \left[ \begin{array}{ccccccc} # \gamma_{i,i} & \gamma_{i,i+1} & 0 & \cdots & 0 & 0 & 0 \\ # \gamma_{i,i+1} & \gamma_{i,i} & \gamma_{i,i+1} & \cdots & 0 & 0 & 0 \\ # 0 & \gamma_{i,i+1} & \gamma_{i,i} & \cdots & 0 & 0 & 0 \\ # \vdots & \vdots & \vdots & \ddots & \vdots & \vdots & \vdots \\ # 0 & 0 & 0 & \cdots & \gamma_{i,i} & \gamma_{i,i+1} & 0 \\ # 0 & 0 & 0 & \cdots & \gamma_{i,i+1} & \gamma_{i,i} & \gamma_{i,i+1}\\ # 0 & 0 & 0 & \cdots & 0 & \gamma_{i,i+1} & \gamma_{i,i} # \end{array} \right] \left[ \begin{array}{c} # c_1 \\ c_2 \\ c_3 \\ \vdots \\ c_{n-2} \\ c_{n-1} \\ c_n # \end{array} \right] = \left[ \begin{array}{c} # \Delta x \cdot f(x_1) \\ \Delta x \cdot f(x_2) \\ \Delta x \cdot f(x_3) \\ \vdots \\ \Delta x \cdot f(x_{n-2}) \\ \Delta x \cdot f(x_{n-1}) \\ \Delta x \cdot f(x_n) # \end{array} \right] # \end{equation*} # # Donde $\gamma_{i,i} = \dfrac{2 \Delta x}{3} + \dfrac{2}{\Delta x}$ y $\gamma_{i,i+1} = \dfrac{\Delta x}{6} - \dfrac{1}{\Delta x}$. # # Es importante notar que dada la definición de $\phi_i(x)$, se obtiene que $u(x_i) = c_i$, por lo que $c_0 = u(x_0) = u(0) = 0$ y $c_{m+1} = u(x_{m+1}) = u(1) = 0$. # <div id='1d' /> # # # FEM en 1D # ## El problema ## # # Deseamos solucionar el mismo problema visto en la sección anterior: # # \begin{align*} # -\frac{d^2u(x)}{dx^2} + u(x) = f(x) \ , \ x \in [0,1] \\ # \end{align*} # # \begin{align*} # u(0) = u(1) = 0 # \end{align*} # # Cuya formulación variacional según vimos es: # # \begin{align*} # \int_0^1 \frac{du(x)}{dx} \frac{dv(x)}{dx} dx + \int_0^1 u(x) v(x) dx = \int_0^1 f(x) v(x) dx # \end{align*} # ## Solución numérica ## # # + f1 = lambda x: 0*x P1 = {"f":f1, "a":0, "b":1} u1 = lambda x: 0*x P1_sol = {"u": u1} f2 = lambda x: x**2 - x - 2 P2 = {"f":f2, "a":0, "b":1} u2 = lambda x: x*(x-1) P2_sol = {"u": u2} f3 = lambda x: np.exp(-(x-.5)**2) P3 = {"f":f3, "a":0, "b":1} P3_sol = {"u": None} f4 = lambda x: np.where(x<=.5, 0, 1) P4 = {"f":f4, "a":0, "b":1} P4_sol = {"u": None} def FEM(P, u0, u1, N=5): # Parameters x = np.linspace(P["a"], P["b"], N+2) dx = x[1]-x[0] # Matrix K diag = (2./dx + 2.*dx/3.)*np.ones(N) udiag = (-1./dx + dx/6.)*np.ones(N) ldiag = udiag K = dia_matrix(([ldiag, diag, udiag], [-1, 0,+1]), shape=(N, N)) # Right hand side b = dx*P["f"](x[1:-1]) # Return the solution c = [u0] + list(spsolve(K, b)) + [u1] return x, c # - P, P_sol = P2, P2_sol x, c = FEM(P, 0, 0, N=15) SolutionPlot(x, P_sol['u'], c) # <div id='dir' /> # # ## Condiciones de Borde de Dirichlet # # Si bien, el problema que ya resolvimos también tenía este tipo de condiciones de borde, volveremos a echarles un vistazo. # # En esta ocasión resolveremos el siguiente problema: # # \begin{align*} # -\frac{d^2u(x)}{dx^2} = f(x) \ , \ x \in [0,1] \\ # \end{align*} # # \begin{align*} # u(0) = u(1) = 0 # \end{align*} # + def DirichletSolver(f, N): x = np.linspace(0., 1., N) h = x[1]-x[0] K = (1./h)*(np.diag(-1*np.ones(N-1),-1) +np.diag( 2*np.ones(N), 0) +np.diag(-1*np.ones(N-1),+1)) b = h*f(x) u = solve(K, b) return x, u f1 = lambda x : np.e**x u1 = lambda x : -np.e**x + (np.e - 1.)*x + 1. f2 = lambda x : np.sin(x) - x u2 = lambda x : np.sin(x) - x*np.sin(1.) + (x**3 - x )/6. # Example f, u = f2, u2 N = 64 x, u_h = DirichletSolver(f, N) SolutionPlot(x, u, u_h) # Convergence f, u = f2, u2 N_list = 64*2**np.arange(6) h_list = [] error_list = [] for N in N_list: x, u_h = DirichletSolver(f, N) h = x[1]-x[0] h_list.append(h) error_list.append(Error(x, u, u_h) ) ErrorPlot(h_list, error_list) # - # <div id='neu' /> # # ## Condiciones de Borde de Neumann # # Ahora resolveremos la misma ecuación, pero con una condición de borde de Neumann cuando $x=1$. Esta vez tenemos: # # \begin{align*} # -\frac{d^2u(x)}{dx^2} = f(x) \ , \ x \in [0,1] \\ # \end{align*} # # \begin{align*} # u(0) = u'(1) = 1 # \end{align*} # + def NeumannSolver(f, N): x = np.arange(1.,N+1.)/N h = 1./N K = (1./h)*( np.diag(-1*np.ones(N-1),-1) +np.diag( 2*np.ones(N), 0) +np.diag(-1*np.ones(N-1),+1) ) K[-1,-2:] = [-1/h, 1/h] b = h*f(x) b[-1] = h/2.*f(1.0) + 1.0 u = solve(K, b) + 1 return x, u f1 = lambda x : np.e**x u1 = lambda x : -np.e**x + (np.e + 1.)*x + 2. f2 = lambda x : np.sin(x) - x u2 = lambda x : np.sin(x) + x**3/6. + x*(1./2. - np.cos(1.) ) + 1. # Example f, u = f1, u1 N = 8 x, u_h = NeumannSolver(f, N) SolutionPlot(x, u, u_h) # Convergence f, u = f1, u1 N_list = 64*2**np.arange(6) h_list = [] error_list = [] for N in N_list: x, u_h = NeumannSolver(f, N) h = x[1]-x[0] h_list.append(h) error_list.append(Error(x, u, u_h) ) ErrorPlot(h_list, error_list) # - # <div id='2d' /> # # # FEM en 2D # # Consideremos la ecuación: # # \begin{align*} # -\Delta u(x,y) + u(x,y) = f(x,y) \ , \ (x,y) \in \Omega \\ # \end{align*} # # \begin{align*} # u(\Omega) = 0 # \end{align*} # # Repitiendo el mismo procedimiento visto en el [Marco Teórico](#teo), llegamos a la formulación variacional: # # \begin{align*} # \int_{\Omega} \nabla u(x,y) \cdot \nabla v(x,y) dA + \int_{\Omega} u(x,y)v(x,y)dA = \int_{\Omega} f(x,y)v(x,y)dA # \end{align*} # # Con $u(x,y) = \sum_{i=0}^{n+1} c_i \phi_i(x,y)$ y $v(x,y) = \phi_k(x,y)$ para $k=1:n$. # # # + N = 100. theta = np.arange(0.,N+1)*2.*np.pi/N lw = 2 r_o = 5./8. + 3./8.*np.cos(3*theta) r_i = 1./8. + 0*theta x_o, y_o = r_o*np.cos(theta), r_o*np.sin(theta) plt.plot(x_o, y_o, ':', label='$\Gamma_o$', lw=lw) x_i, y_i = r_i*np.cos(theta), r_i*np.sin(theta) plt.plot(x_i, y_i, '-', label='$\Gamma_i$',lw=lw) plt.axis('equal') plt.show() # - # <div id='acknowledgements' /> # # # Acknowledgements # * _Material creado por profesor <NAME>_ (`<EMAIL>`) _y ayudantes: <NAME> y <NAME>. DI UTFSM. Abril 2016._ # # *** # ### DISCLAIMER ### # # El presente ipython notebook ha sido creado para el curso **ILI286 - Computación Científica 2**, del [Departamento de Informática](http://www.inf.utfsm.cl/), [Universidad Técnica Federico Santa María](http://www.utfsm.cl/). # # El material ha sido creado por <NAME> y <NAME>, y es distribuido sin restricciones. En caso de encontrar un error, por favor no dude en contactarnos. # # [Update 2016] (Álvaro) Se agregó tabla de contenidos, contexto y marco teórico. Se unieron los notebooks de 1D y 2D. Adicionalmente se adjuntaron los códigos de condiciones de Dirichlet y Neumann. # # [Update 2019] (<NAME>) Fixing issue with titles of sections. Fixing issue with identation and issue with a non-integer length of array. # ***
SC2/U3_FEM1D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="cbduK4oCsmTD" # !python -m spacy download en_core_web_lg import spacy import tensorflow as tf import re import spacy from tensorflow.keras.layers.experimental import preprocessing from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, GRU, Embedding from tensorflow.keras.preprocessing.sequence import pad_sequences from google.colab import drive drive.mount("/content/drive") import numpy as np import os import time nlp = spacy.load("en_core_web_lg") file = open("/content/drive/MyDrive/466 data/en_tech_train.txt", "rb") train_set = file.read().decode(encoding='utf-8') file = open("/content/drive/MyDrive/466 data/en_tech_test.txt", "rb") test_set = file.read().decode(encoding='utf-8') file = open("/content/drive/MyDrive/466 data/en_tech_validate.txt", "rb") validate_set = file.read().decode(encoding='utf-8') # + colab={"base_uri": "https://localhost:8080/"} id="lg5qWr3ooIZx" outputId="3bb13b96-8865-4f07-8d35-46c1c5e00217" from google.colab import drive drive.mount('/content/drive') # + id="FOOD5gWmvfBw" def predict_single(model, tokenizer, seed_text): predicted_word = None encoded = tokenizer.texts_to_sequences([seed_text])[0] encoded = pad_sequences([encoded], maxlen=5, truncating="pre") temp = model.predict(encoded) y_predict = np.argmax(temp, axis=-1) for word, index in tokenizer.word_index.items(): if index == y_predict: predicted_word = word break return str(predicted_word) # + id="MIaweIWX7FJU" def generate_text(model, tokenizer, text_seq_len, seed_text, n_words): text = [] word_distribution = [] for _ in range(n_words): encoded = tokenizer.texts_to_sequences([seed_text])[0] #encoded = pad_sequences([encoded], maxlen = 4, truncating='pre') temp = model.predict(encoded) y_predict = np.argmax(temp, axis=-1) #print(y_predict) word_list = [] max_index = y_predict for x in range(4): temp[0][max_index] = -1 max_index = np.argmax(temp, axis=-1) word_list.append(max_index) predicted_word = '' temp = model.predict(encoded) for word, index in tokenizer.word_index.items(): if index == y_predict: predicted_word = word break seed_text = seed_text + ' ' + predicted_word text.append(predicted_word) distribution = [(predicted_word, temp[0][y_predict][0])] for predicted in word_list: predicted_word = '' for word, index in tokenizer.word_index.items(): if index == predicted: predicted_word = word break distribution.append((predicted_word, temp[0][predicted][0])) word_distribution.append(distribution) return ' '.join(text), word_distribution # + id="GxF8d2LD4U54" def validate(sentences, nlp): new = [] for i in range(len(sentences)): new.append(sentences[i].split()) validate_sequences = np.array(new) validate_X, validate_y = validate_sequences[:, :-1], validate_sequences[:, -1] average_accuracy = 0 num = 0 for i in range(len(validate_X)): prediction = predict_single(model, tokenizer, ' '.join(validate_X[i])) x = nlp(prediction) y = nlp(str(validate_y[i])) if (x[0].has_vector) and (y[0].has_vector): average_accuracy += nlp(prediction).similarity(nlp(str(validate_y[i]))) num += 1 average_accuracy /= num return average_accuracy # + id="eFI9h_JRLV-5" processed_train_set = train_set.split("\n") processed_test_set = test_set.split("\n") processed_validate_set = validate_set.split("\n") input_size = 6 test_size = 6 train_sentences = [] test_sentences = [] validate_sentences = [] for line in processed_train_set: temp = line.split() for i in range(input_size, len(temp)): seq = temp[i - input_size:i] temp2 = " ".join(seq) train_sentences.append(temp2) #train_sentences[0:10] for line in processed_test_set: temp = line.split() for i in range(test_size, len(temp)): seq = temp[i - test_size:i] temp2 = " ".join(seq) test_sentences.append(temp2) print(test_sentences[0:10]) for line in processed_validate_set: temp = line.split() for i in range(input_size, len(temp)): seq = temp[i - input_size:i] temp2 = " ".join(seq) validate_sentences.append(temp2) tokenizer = Tokenizer() tokenizer.fit_on_texts(train_sentences) train_sequences = tokenizer.texts_to_sequences(train_sentences) train_sequences = np.array(train_sequences) train_X, train_y = train_sequences[:, :-1], train_sequences[:, -1] vocab_size = len(tokenizer.word_index) + 1 #model = tf.keras.models.load_model('/content/drive/MyDrive/model/saved_model/my_model') train_y = to_categorical(train_y, num_classes = vocab_size) # sequences = np.array(sequences) # print(sequences.shape) model = Sequential() model.add(Embedding(vocab_size, 256)) # model.add(GRU(512, return_sequences=True)) model.add(GRU(256)) model.add(Dense(1024, activation='relu')) model.add(Dense(vocab_size, activation='softmax')) model.summary() model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(train_X,train_y, batch_size=400, epochs=40) # + id="QJLGT4MGXuRe" colab={"base_uri": "https://localhost:8080/"} outputId="9c2c3383-5ad7-40ac-d3c0-5cba3f1be0ff" accuracy = validate(test_sentences, nlp) print(accuracy)
code/RNN_with_GRU/RNN_with_GRU(word_based).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import joblib import matplotlib.pyplot as plt import numpy as np import seaborn as sns # Plot styling. plt.style.use(['seaborn-white', 'seaborn-paper']) plt.rc('font', family='sans-serif') sns.set_palette(['#6da7de', '#9e0059', '#dee000', '#d82222', '#5ea15d', '#943fa6', '#63c5b5', '#ff38ba', '#eb861e', '#ee266d']) sns.set_context('paper', font_scale=1.3) rf_stats, *_ = joblib.load('../data/processed/train_classifier_rf.joblib') lr_stats, *_ = joblib.load('../data/processed/train_classifier_lr.joblib') svm_stats, *_ = joblib.load('../data/processed/train_classifier_svm.joblib') # + width = 7 height = width / 1.618 # Golden ratio. fig, ax = plt.subplots(figsize=(width, height)) interval = np.linspace(0, 1, 101) tpr_rf = rf_stats['tpr_mean_test'] tpr_rf[0], tpr_rf[-1] = 0, 1 tpr_lr = lr_stats['tpr_mean_test'] tpr_lr[0], tpr_lr[-1] = 0, 1 tpr_svm = svm_stats['tpr_mean_test'] tpr_svm[0], tpr_svm[-1] = 0, 1 ax.plot(interval, tpr_rf, label=f'Random forest (AUC = {rf_stats["roc_auc_test"]:.3f} ' f'± {rf_stats["roc_auc_std_test"]:.3f})') ax.fill_between(interval, tpr_rf - rf_stats['tpr_std_test'], tpr_rf + rf_stats['tpr_std_test'], alpha=0.2) ax.plot(interval, tpr_lr, label=f'Logistic regression (AUC = {lr_stats["roc_auc_test"]:.3f} ' f'± {lr_stats["roc_auc_std_test"]:.3f})') ax.fill_between(interval, tpr_lr - lr_stats['tpr_std_test'], tpr_lr + lr_stats['tpr_std_test'], alpha=0.2) ax.plot(interval, tpr_svm, label=f'SVM (AUC = {svm_stats["roc_auc_test"]:.3f} ' f'± {svm_stats["roc_auc_std_test"]:.3f})') ax.fill_between(interval, tpr_svm - svm_stats['tpr_std_test'], tpr_svm + svm_stats['tpr_std_test'], alpha=0.2) ax.plot([0, 1], [0, 1], c='black', ls='--') ax.set_xlim(0, 1) ax.set_ylim(0, 1) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.legend(loc='lower right', frameon=False) sns.despine() plt.savefig('train_classifier_roc.png', dpi=300, bbox_inches='tight') plt.show() plt.close()
notebooks/train_classifier_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # + arr = np.arange(1,10) print(arr) print(arr+2) print(arr-2) print(arr*2) arr=arr**2 arr # - print('First Four : {}'.format(arr[:4])) print('Last Four : {}'.format(arr[-4:])) print('Start at 2, exclude last 2 : {}'.format(arr[2:-2])) print('Start at 0, pick odd indexes : {}'.format(arr[0:arr.size:2])) print('Sort descending : {}'.format(arr[::-1])) arr = np.array([ ['Alice', 'Beth','Cathy', 'Dorothy'], ['65', '78', '90','81'], ['71', '82', '79', '92'] ]) print(arr) arr[...,-2:]
notebooks/04-Numpy-basic array slicing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Azure ML & Azure Databricks notebooks by <NAME>. # # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![04ACI](files/tables/image1.JPG) # #Data Ingestion import os import urllib # + # Download AdultCensusIncome.csv from Azure CDN. This file has 32,561 rows. basedataurl = "https://amldockerdatasets.azureedge.net" datafile = "AdultCensusIncome.csv" datafile_dbfs = os.path.join("/dbfs", datafile) if os.path.isfile(datafile_dbfs): print("found {} at {}".format(datafile, datafile_dbfs)) else: print("downloading {} to {}".format(datafile, datafile_dbfs)) urllib.request.urlretrieve(os.path.join(basedataurl, datafile), datafile_dbfs) # - # Create a Spark dataframe out of the csv file. data_all = sqlContext.read.format('csv').options(header='true', inferSchema='true', ignoreLeadingWhiteSpace='true', ignoreTrailingWhiteSpace='true').load(datafile) print("({}, {})".format(data_all.count(), len(data_all.columns))) data_all.printSchema() #renaming columns columns_new = [col.replace("-", "_") for col in data_all.columns] data_all = data_all.toDF(*columns_new) data_all.printSchema() display(data_all.limit(5)) # #Data Preparation # + # Choose feature columns and the label column. label = "income" xvars = set(data_all.columns) - {label} print("label = {}".format(label)) print("features = {}".format(xvars)) data = data_all.select([*xvars, label]) # Split data into train and test. train, test = data.randomSplit([0.75, 0.25], seed=123) print("train ({}, {})".format(train.count(), len(train.columns))) print("test ({}, {})".format(test.count(), len(test.columns))) # - # #Data Persistence # + # Write the train and test data sets to intermediate storage train_data_path = "AdultCensusIncomeTrain" test_data_path = "AdultCensusIncomeTest" train_data_path_dbfs = os.path.join("/dbfs", "AdultCensusIncomeTrain") test_data_path_dbfs = os.path.join("/dbfs", "AdultCensusIncomeTest") train.write.mode('overwrite').parquet(train_data_path) test.write.mode('overwrite').parquet(test_data_path) print("train and test datasets saved to {} and {}".format(train_data_path_dbfs, test_data_path_dbfs)) # -
how-to-use-azureml/azure-databricks/amlsdk/ingest-data-02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.1 # language: julia # name: julia-0.6 # --- using TextAnalysis, Languages, Clustering filename = joinpath(dirname(@__FILE__), "data", "highlights.csv") # + function get_headers(source) headers = [ String(s) for s in source[1,:] ] source = source[2:end,:] # clear the headers headers, source end source = readcsv(filename) headers, source = get_headers(source) # - sample = source[:,5] length(sample) # + #fd = FileDocument(sample) #sd = StringDocument(fd) text = join(sample, ",") sd = StringDocument(text) crps = Corpus([sd]) # - remove_punctuation!(sd) update_lexicon!(crps) lexicon(crps) # + update_inverse_index!(crps) inverse_index(crps) hash_function!(crps, TextHashFunction()) crps["Handelskrieg"] # - m = DocumentTermMatrix(crps) D = dtm(m, :dense) T = tf_idf(D) cl = kmeans(T, 5) # + m = DocumentTermMatrix(crps) k = 2 # number of topics iteration = 1000 # number of gibbs sampling iterations alpha = 0.1 # hyper parameter beta = 0.1 # hyber parameter l = lda(m, k, iteration, alpha, beta) # l is k x word matrix. # value is probablity of occurrence of a word in a topic.
analysis/textanalysis-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project Week 2: Classification with the Titanic Database # #### Importing libraries import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.dummy import DummyClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import recall_score from sklearn.pipeline import make_pipeline from sklearn.model_selection import cross_val_score from sklearn.metrics import plot_confusion_matrix from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.tree import plot_tree from sklearn.dummy import DummyClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.compose import ColumnTransformer from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import KBinsDiscretizer from sklearn.preprocessing import FunctionTransformer from sklearn.metrics import precision_score from sklearn.metrics import f1_score from sklearn.metrics import classification_report from sklearn.metrics import plot_roc_curve from sklearn.metrics import accuracy_score # ### Step 1: Getting data df = pd.read_csv('Data/train.csv',sep =",") df.head() # ### Step 2: Performing EDA df.shape # Calculate the number of surviving/non-surviving passengers and display it as a bar plot. sns.countplot(data = df, x = "Survived") plt.xlabel("Died or survived (0 = died, 1 = survived)") plt.ylabel("Count") # Calculate the proportion of surviving 1st class passengers with regards to the total number of 1st class passengers. df_1st = df.loc[df['Pclass']== 1 ] df_1st['Survived'].value_counts(normalize=True).plot(kind='pie', autopct="%.1f") plt.ylabel("Survived or not") # Create a histogram showing the age distribution of passengers. Compare surviving/non-surviving passengers. sns.histplot(data=df, x="Age", hue="Survived") # Calculate the average age for survived and drowned passengers separately. df.groupby(['Survived'])['Age'].mean() # Create a table counting the number of surviving/dead passengers separately for 1st/2nd/3rd class and male/female. df.groupby(['Survived','Sex','Pclass'])['Pclass'].count().to_frame().rename(columns = {'Pclass': 'People Count'}) # ### Step 3: Model definition and train test split # We split the data into train and test datasets. The model should not see the test data until the ened when we use it to evaluate the performance of the model. df2 = df.loc[df['Sex']=="male"] df2 y = df2['Survived'] y.shape X = df2[['Pclass', 'Age', 'Name','SibSp', 'Fare', 'Parch', 'Embarked']] #approach, you know which features you want to take X X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 42) X_train.head() X_train.shape, X_test.shape, y_train.shape, y_test.shape # ### Step 4: Feature engineering # #### Custom transformation def name_length(df2): length = df2[df2.columns[0]].str.len() return length.values.reshape(-1, 1) name_transformer = FunctionTransformer(name_length) name_transformer.fit(X[['Name']]) name_transformer.transform(X[['Name']]) # #### Definition of pipelines cat_pipe = make_pipeline( SimpleImputer(strategy='most_frequent'), OneHotEncoder(sparse=False, handle_unknown='ignore') ) num_pipe = make_pipeline( SimpleImputer(strategy='median'), MinMaxScaler() ) name_pipe = make_pipeline( FunctionTransformer(name_length), MinMaxScaler() ) # #### Column transformer feature_transform = ColumnTransformer([ ('do_nothing', 'passthrough', ['Pclass']), ('cat_preprocessing', cat_pipe, ['Embarked']), ('num_preprocessing', num_pipe, ['Age', 'Fare', 'SibSp', 'Parch']), ('name_preprocessing', name_pipe, ['Name']) ]) X_train_trans = feature_transform.fit_transform(X_train) X_train_trans # ### Step 5: Train model and make initial predictions # #### 5.1 Train a baseline model ### Initiate the model dummy_model = DummyClassifier(strategy= 'most_frequent') ### Fit the model dummy_model.fit(X_train_trans,y_train) y_pred_dummy = dummy_model.predict(X_test) y_pred_dummy # #### 5.2 Train logistic regression model m = LogisticRegression() m.fit(X_train_trans, y_train) # Take a look at the coefficients m.coef_ m.intercept_,2 f' w1: {m.coef_} and b: {m.intercept_}' X_test.head() # #### 5.3 Preliminary Model evaluation (Version 2) m.score(X_train_trans, y_train) # calculate the accuracy round(dummy_model.score(X_test, y_test),2) X_test_trans = feature_transform.transform(X_test) # calculate the accuracy m.score(X_test_trans, y_test) y_pred = m.predict(X_test_trans) y_pred # #### 5.4 Trying the Decision Tree decision = DecisionTreeClassifier(max_depth=None) decision.fit(X_train_trans, y_train) decision.score(X_train_trans,y_train) # It is heavily overfitted! decision2 = DecisionTreeClassifier(max_depth= 2) decision2.fit(X_train_trans, y_train) round(decision2.score(X_train_trans, y_train),2) # ##### Evaluate decision tree round(decision.score(X_test_trans,y_test),2) round(decision2.score(X_test_trans,y_test),2) # When the problem of overfitting is ameliorated, the accuracy of the model goes a little up. In this sense, tuning the parameters from the model is the strategy to improve the accuracy. # #### 5.4 Trying the Random Forest forestdecision = RandomForestClassifier(max_depth=10, n_estimators = 100, random_state = 42) forestdecision.fit(X_train_trans, y_train) forestdecision.score(X_train_trans,y_train) # With a large number for the "max_depth" there is huge overfitting of the training set! forestdecision2 = RandomForestClassifier(max_depth=3, n_estimators = 100, random_state = 42) forestdecision2.fit(X_train_trans, y_train) forestdecision2.score(X_train_trans,y_train) # This looks better! forestdecision3 = RandomForestClassifier(max_depth=3, n_estimators = 100, class_weight='balanced', random_state = 42) forestdecision3.fit(X_train_trans, y_train) forestdecision3.score(X_train_trans,y_train) # ##### Evaluate Random forest models round(forestdecision.score(X_test_trans,y_test),2) round(forestdecision2.score(X_test_trans,y_test),2) round(forestdecision3.score(X_test_trans,y_test),2) # With the inclusion of the "class_weight='balanced', which is recommendable when the classes are unbalanced, the accuracy is reduced. Regarding the robustness/model quality tradeoff we discussed before, the impact is actually negative. # # The models' predictive performance is not that high because the feature "sex", which is very important, has been removed! As can be seen in the heatmap above, there is not much strong correlation between the other data, so we lose one of the most important predictors of the dependent variables. # # `"class_weight='balanced': uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data"` # # https://towardsdatascience.com/practical-tips-for-class-imbalance-in-binary-classification-6ee29bcdb8a7 # # ### Step 6: Cross-validation # We use cross-validation to evaluate both how good our model is, but also how robust it is. #For the logistic regression model cross_val_score(m, X_train_trans, y_train, cv = 5) # For the tree cross_val_score(decision, X_train_trans, y_train, cv = 5) # For the forest 1 cross_val_score(forestdecision, X_train_trans, y_train, cv = 5) # For the forest 2 cross_val_score(forestdecision2, X_train_trans, y_train, cv = 5) # For the forest 3 cross_val_score(forestdecision3, X_train_trans, y_train, cv = 5) # We can evaluate how robust is our model - in depending on sample bias ==> it tells us how good it is at generalizing # A model is robust when we get similar scores for different folds ==> let's compare the similarity of the scores # Logistic regression round(np.mean(cross_val_score(m, X_train_trans, y_train, cv = 5)),5), round(np.std(cross_val_score(m, X_train_trans, y_train, cv = 5)),3) # Tree round(np.mean(cross_val_score(decision, X_train_trans, y_train, cv = 5)),5), round(np.std(cross_val_score(decision, X_train_trans, y_train, cv = 5)),3) # Forest round(np.mean(cross_val_score(forestdecision3, X_train_trans, y_train, cv = 5)),5), round(np.std(cross_val_score(forestdecision3, X_train_trans, y_train, cv = 5)),3) # ### Step 7: Model evaluation (complete version) #percentages conf = confusion_matrix(y_test,y_pred) plot_confusion_matrix(estimator=m, X=X_test_trans, y_true=y_test, #normalize='all', cmap='Blues') # #### 7.1 Accuracy round(accuracy_score(y_test,y_pred),2) round(m.score(X_test_trans,y_test),2) round(m.score(X_train_trans,y_train),2) # #### 7.2 Recall round(recall_score(y_test,y_pred),2) # #### 7.3 Precision round(precision_score(y_test, y_pred),2) # #### 7.4 F1 score round(f1_score(y_test,y_pred),2) print(classification_report(y_test, y_pred)) # #### 7.5 ROC AUC Curve plot_roc_curve(m, X_test_trans, y_test) conf_dummy = confusion_matrix(y_test,y_pred_dummy) conf_dummy plot_confusion_matrix(estimator=dummy_model, X=X_test, y_true=y_test, #normalize='all', cmap='Blues') ## Accuracy round(accuracy_score(y_test,y_pred_dummy),2) ## Recall round(recall_score(y_test,y_pred_dummy),2) ## Precision round(precision_score(y_test, y_pred_dummy),2) ## F1 Score round(recall_score(y_test,y_pred_dummy),2) ## Classification report print(classification_report(y_test, y_pred_dummy)) # ### Step 8: Making some predictions prob = m.predict_proba(X_test_trans) [e[0] for e in prob] X_test['prediction'] = y_pred X_test.head() X_test['prob of dying'] = [e[0] for e in prob] X_test['prob of surviving'] = [e[1] for e in prob] X_test
week_02/Project_week_2.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.4.3-pre # language: julia # name: julia-0.4 # --- using Colors using DataFrames using Distributions using Gadfly # + function auc{R <: Real, N <: Integer}(h::Tuple{AbstractVector{R}, AbstractVector{N}}) auc(h[1], h[2]) end function auc{R <: Real, N <: Integer}(edges::AbstractVector{R}, counts::AbstractVector{N}) deltax = edges[2 : end] - edges[1 : end - 1] sum(deltax .* counts) end; # - d = Distributions.Normal(0, 1) # + srand(1) n = 10000 bins = 100 x = rand(d, n) (edges, counts) = hist(x, bins) xauc = auc(edges, counts) xvec = collect(edges) xdf = DataFrame( xmin = collect(xvec[1 : (end - 1)]), xmax = collect(xvec[2 : end]), count = counts ) xdf[:density] = xdf[:count] / xauc; xdf[:x] = (xdf[:xmin] .+ xdf[:xmax]) ./ 2; # - lower = floor(Int64, xvec[1]) upper = ceil(Int64, xvec[end]) xₛ = linspace(lower, upper, (upper - lower) * 100 + 1) xpdf = DataFrame(x = xₛ, density = pdf(TDist(12), xₛ)); Gadfly.plot( layer( xpdf, x = :x, y = :density, Geom.line, Theme(default_color = colorant"orange") ), layer( xdf, x = :xmin, y = :density, Geom.bar, Theme(default_color = colorant"gainsboro") ) ) plot(xdf, x = :x, y = :density, Geom.bar) reshape(xdf[:x], 23, 3) typeof(xdf[:density])
Attic/Distributions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf2 # language: python # name: tf2 # --- # + import numpy as np import os import time from lmfit.models import GaussianModel, Model from sklearn.linear_model import LinearRegression from sklearn import mixture, metrics from shapely.geometry import Polygon from scipy.stats.distributions import norm from sklearn.neighbors import KernelDensity from scipy.stats import gaussian_kde from matplotlib import pyplot as plt from matplotlib import cm from skimage.measure import EllipseModel from matplotlib.patches import Ellipse from scipy import ndimage as ndi from scipy.spatial import distance from numpy import linalg from skimage import io from skimage.draw import ellipse from skimage.color import rgb2gray from skimage import filters from skimage.morphology import disk from PIL import Image, ImageDraw, ImageFilter, ImageOps import copy import cv2 import os import sys import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from scipy.ndimage import distance_transform_edt as edt from skimage.draw import ellipse from skimage.measure import find_contours, approximate_polygon, subdivide_polygon import logging import glob from radio_beam.commonbeam import getMinVolEllipse from src.utils import grainPreprocess, grainShow, grainMark, grainDraw, grainApprox, grainStats, grainMorphology,grainGenerate from src.cfg import CfgAnglesNames, CfgBeamsNames, CfgDataset # + pycharm={"name": "#%%\n"} folder_path='data/dataset' new_folder_path='data/dataset_t' folders = os.listdir(folder_path) if not os.path.exists(new_folder_path): os.mkdir(new_folder_path) for folder in folders: if not os.path.exists(new_folder_path + '/' + folder): os.mkdir(new_folder_path + '/' + folder) for i, folder in enumerate(folders): images_names = os.listdir(folder_path + '/' + folder) for i, name in enumerate(images_names): if 'hdr' not in name: image = io.imread(folder_path + '/' + folder + '/' + name) image = grainPreprocess.combine(image, h=140) io.imsave(new_folder_path + '/' + folder + '/' + name , image) # + pycharm={"name": "#%%\n"} def img_load_preprocess(path, img_names, h=150, k=1): imgs = [] for i, class_img_names in enumerate(img_names): imgs.append([]) for name in class_img_names: img = io.imread(path + '/' + name) img = grainPreprocess.image_preprocess(img, h, k) imgs[i].append(img) return imgs # + path = 'data/images' all_images = np.load('data/saved np/all_images.npy', allow_pickle=True) grain_names = np.array( [['Ultra_Co8.jpg'], ['Ultra_Co11.jpg'], ['Ultra_Co6_2.jpg'], ['Ultra_Co15.jpg'], ['Ultra_Co25.jpg']]) types = ['средние зерна', 'мелкие зерна', 'мелкие зерна', 'крупные зерна', 'средне-мелкие зерна'] grain_images = img_load_preprocess(path, grain_names) # + [markdown] pycharm={"name": "#%% md\n"} # ### Считаем все изображения, предобработаем их и сохраним # + pycharm={"name": "#%%\n"} all_images = grainPreprocess.read_preprocess_data(images_dir='data/dataset/', save_name='gopa.npy', max_images_num_per_class=10, preprocess=False, save=True, crop_bottom=True, resize=True, resize_shape=(1024,1024,3)) # + pycharm={"name": "#%%\n"} # - # ### Сгенерируем распределения углов, аппроксимируем их бимодальным гауссом и сохраним # + pycharm={"name": "#%%\n"} from src.utils import grainGenerate,GrainLogs,grainMark,grainStats,grainApprox import numpy as np # + pycharm={"name": "#%%\n"} # images = np.load('data/saved np/all_images.npy', allow_pickle=True) images = np.load('data/saved np/images_with_filters.npy') # path = 'data/saved np' path = 'data' grain_names = np.array( [['Ultra_Co8.jpg'], ['Ultra_Co11.jpg'], ['Ultra_Co6_2.jpg'], ['Ultra_Co15.jpg'], ['Ultra_Co25.jpg']]) types = ['средние зерна', 'мелкие зерна', 'мелкие зерна', 'крупные зерна', 'средне-мелкие зерна'] for step in range(1, 6): grainGenerate.angles_approx_save(path, images, grain_names[:, 0], types, step, save=False) # + pycharm={"name": "#%%\n"} def angles_approx(folder, step, width, height,save=False, font_size=20): # # вывод распределения углов для всех фотографий одного образца # xy_scatter = np.load(f'{folder}/' + CfgAnglesNames.values + f'{step}.npy', allow_pickle=True) xy_gauss = np.load(f'{folder}/' + CfgAnglesNames.approx + f'{step}.npy', allow_pickle=True) xy_gauss_data = np.load(f'{folder}/' + CfgAnglesNames.approx_data + f'{step}.npy', allow_pickle=True) texts = np.load(f'{folder}/' + CfgAnglesNames.legend + f'{step}.npy', allow_pickle=True) plt.rcParams['font.size'] = '15' gs = gridspec.GridSpec(1, 1) plt.figure(figsize=(width, height)) ax = plt.subplot(gs[0]) # маркеры для одновременного вывода скаттера для разных классов # количество варкеров=количество классов-1 markers = ['v', 's', 'D', 'd', 'p', '*'] for i in range(len(xy_scatter)): marker = markers[i] ax.plot(xy_gauss[i][0], xy_gauss[i][1]) ax.scatter(xy_scatter[i][0], xy_scatter[i][1], marker=marker) ax.legend(texts, fontsize=14, loc=1) plt.ylabel('p(x)', fontsize=font_size) plt.xlabel('угол связующего вещества, градусы', fontsize=font_size) if save: plt.savefig(f'распределение углов шаг {step}') plt.show() # + pycharm={"name": "#%%\n"} folder = 'data/saved np/' step = 5 width = 20 height = 20 # генерация распределения, аппроксимирование распределения и сохранение данных angles_approx(folder, step, width, height) # + pycharm={"name": "#%%\n"} def angles_approx(folder, step, width, height,save=False, font_size=20): # # вывод распределения углов для всех фотографий одного образца # xy_scatter = np.load(f'{folder}/' + CfgAnglesNames.values + f'{step}.npy', allow_pickle=True) xy_gauss = np.load(f'{folder}/' + CfgAnglesNames.approx + f'{step}.npy', allow_pickle=True) xy_gauss_data = np.load(f'{folder}/' + CfgAnglesNames.approx_data + f'{step}.npy', allow_pickle=True) texts = np.load(f'{folder}/' + CfgAnglesNames.legend + f'{step}.npy', allow_pickle=True) synth_angles=np.load(f'{folder}/wc-co.npy') synth_angles=np.concatenate([synth_angles,np.zeros((180))]) plt.rcParams['font.size'] = '15' gs = gridspec.GridSpec(1, 1) plt.figure(figsize=(width, height)) ax = plt.subplot(gs[0]) # маркеры для одновременного вывода скаттера для разных классов # количество варкеров=количество классов-1 markers = ['v', 's', 'D', 'd', 'p', '*'] for i in range(len(xy_scatter)): marker = markers[i] ax.plot(xy_gauss[i][0], xy_gauss[i][1]) ax.scatter(xy_scatter[i][0], xy_scatter[i][1], marker=marker) ax.plot(synth_angles[::-1]) texts=list(texts) texts.append('синтетические углы') ax.legend(texts, fontsize=14, loc=2) plt.ylabel('p(x)', fontsize=font_size) plt.xlabel('угол связующего вещества, градусы', fontsize=font_size) if save: plt.savefig(f'наложение {step}') plt.show() # + pycharm={"name": "#%%\n"} folder = 'data/saved np/' step = 5 width = 20 height = 20 # генерация распределения, аппроксимирование распределения и сохранение данных angles_approx(folder, step, width, height,save=True) # + pycharm={"name": "#%%\n"} def angles_approx_modes(folder, step, start1, stop1, start2, stop2, width, height, font_size=25): # # аппроксимация каждой моды # xy_scatter_o = np.load(f'{folder}/xy_scatter_step_{step}.npy', allow_pickle=True) xy_gauss_o = np.load(f'{folder}/xy_gauss_step_{step}.npy', allow_pickle=True) texts = np.load(f'{folder}/texts_step_{step}.npy', allow_pickle=True) xy_scatter1 = [] xy_scatter2 = [] for j, sc in enumerate(xy_scatter_o): x = sc[0] y = sc[1] xy_scatter1.append([[], []]) xy_scatter2.append([[], []]) for i, xi in enumerate(x): if xi <= stop1 and xi >= start1: xy_scatter1[j][0].append(x[i]) xy_scatter1[j][1].append(y[i]) if xi <= stop2 and xi >= start2: xy_scatter2[j][0].append(x[i]) xy_scatter2[j][1].append(y[i]) plt.rcParams['font.size'] = '25' markers = ['v', 's', 'D', 'd', 'p', '*'] k = 2 for i in range(len(xy_scatter_o)): plt.figure(figsize=(width, height)) marker = markers[i] # plt.scatter(xy_scatter_o[i][0],xy_scatter_o[i][1]) x1 = np.array(xy_scatter1[i][0]) y1 = xy_scatter1[i][1] x2 = np.array(xy_scatter2[i][0]) y2 = xy_scatter2[i][1] mu1, sigma1, amp1 = grainApprox.gaussian_fit(x1, y1, mu=120) mu2, sigma2, amp2 = grainApprox.gaussian_fit(x2, y2, mu=225) x_gauss1 = np.arange(mu1 - 3 * sigma1, mu1 + 3 * sigma1) x_gauss2 = np.arange(mu2 - 3 * sigma2, mu2 + 3 * sigma2) y_gauss1 = grainStats.gaussian(x_gauss1, mu1, sigma1, amp1) y_gauss2 = grainStats.gaussian(x_gauss2, mu2, sigma2, amp2) # plt.plot(x_gauss,y_gauss) plt.scatter(x1, y1, marker=marker) plt.plot(x_gauss1, y_gauss1) plt.scatter(x2, y2, marker=markers[i + 1]) plt.plot(x_gauss2, y_gauss2) # print('mu1=',mu1,' sigma1=',sigma1,'amp1=',amp1) # print('mu2=',mu2,' sigma2=',sigma2,'amp2=',amp2) plt.legend(['moda1', 'moda2', 'approx1', texts[i]], fontsize=14, loc=1) plt.ylabel('p(x)', fontsize=font_size) plt.xlabel('угол связующего вещества, градусы', fontsize=font_size) # plt.ylim([0,max(np.log(xy_scatter[i][1][:-1]))]) # plt.savefig('крупные моды отдельная аппрокс ' + names[i]) # print(texts) # plt.savefig('распределение углов') plt.show() # + folder = 'saved np' width = 12 height = 15 step = 1 start = 340 stop = 360 start1 = 50 stop1 = 180 start2 = 180 stop2 = 300 angles_approx_modes(folder, step, start1, stop1, start2, stop2, width, height) # - # ## Распределение диаметров # + pycharm={"name": "#%%\n"} pixel = 49.7 / 1000 step = 5 images = np.load('data/saved np/images_with_filters.npy') folder = 'data/' names = np.array( [['Ultra_Co8.jpg'], ['Ultra_Co11.jpg'], ['Ultra_Co6_2.jpg'], ['Ultra_Co15.jpg'], ['Ultra_Co25.jpg']]) types = ['средние зерна', 'мелкие зерна', 'мелкие зерна', 'крупные зерна', 'средне-мелкие зерна'] for step in range(5, 6): # print('step: ',step) grainGenerate.diametr_approx_save(folder, images, names[:, 0], types, step, pixel, save=False) # + pycharm={"name": "#%%\n"} def diametr_approx_all(folder, step, N, M): # # вывод распределения длин а- и б- полуосей для разных образцов # xy_scatter = np.load(f'{folder}/' + CfgBeamsNames.values + f'{step}.npy', allow_pickle=True) xy_linear = np.load(f'{folder}/' + CfgBeamsNames.approx + f'{step}.npy', allow_pickle=True) xy_linear_data = np.load(f'{folder}/' + CfgBeamsNames.approx_data + f'{step}.npy', allow_pickle=True) texts = np.load(f'{folder}/' + CfgBeamsNames.legend + f'{step}.npy', allow_pickle=True) plt.rcParams['font.size'] = '20' gs = gridspec.GridSpec(2, 1) # для статьи 12,18 plt.figure(figsize=(N, M)) ax1 = plt.subplot(gs[0]) ax2 = plt.subplot(gs[1]) ax1.set_title('распределение a-полуосей связующего вещества', fontsize=20) ax2.set_title('распределение b-полуосей связующего вещества', fontsize=20) ax1.set_ylabel('ln(p(x))', fontsize=15) ax1.set_xlabel('длина полуоси, мкм', fontsize=15) ax2.set_ylabel('ln(p(x))', fontsize=15) ax2.set_xlabel('длина полуоси, мкм', fontsize=15) # ax3.grid() markers = ['v', 's', 'D', 'd', 'p', '*'] for i in range(len(xy_scatter)): marker = markers[i] ax1.plot(xy_linear[i][0][0], xy_linear[i][0][1]) ax1.scatter(xy_scatter[i][0][0], xy_scatter[i][0][1], marker=marker) ax2.plot(xy_linear[i][1][0], xy_linear[i][1][1]) ax2.scatter(xy_scatter[i][1][0], xy_scatter[i][1][1], marker=marker) # plt.savefig('распределение полуосей') ax1.legend(texts[:, 0], fontsize=14, loc=1) ax2.legend(texts[:, 1], fontsize=14, loc=1) plt.show() # + N = 30 M = 30 # для статье N=12,M=18 step = 5 folder = '../data/saved np' diametr_approx_all(folder, step, N, M) # + [markdown] pycharm={"name": "#%% md\n"} # ### Вывод на снимке ненормалных углов # - def draw_edges(image): # # рисует на изображении линии по точкам контура cnts # линии в стиле x^1->x^2,x^2->x^3 и тд # new_image = copy.copy(image) im = Image.fromarray(np.uint8(cm.gist_earth(new_image) * 255)) draw = ImageDraw.Draw(im) width = 50 cnts = grainMark.get_contours(image, tol=4) for j, cnt in enumerate(cnts): if len(cnt) > 2: for i, point in enumerate(cnt[:-1]): point1 = cnt[i - 1] point2 = cnt[i] point3 = cnt[i + 1] x1, y1 = point1[1], point1[0] x2, y2 = point2[1], point2[0] x3, y3 = point3[1], point3[0] thr = 5 if abs(x2 - image.shape[0] - 1) > thr and abs(y2 - image.shape[1] - 1) > thr and x2 > thr and y2 > thr: v1 = np.array((x1 - x2, y1 - y2)).reshape(1, 2) v2 = np.array((x3 - x2, y3 - y2)).reshape(1, 2) dot = np.dot(v1[0], v2[0]) dist1 = np.linalg.norm(v1[0]) dist2 = np.linalg.norm(v2[0]) cos = dot / (dist1 * dist2) v = np.concatenate([v1, v2]) det = np.linalg.det(v) angle = 0 if abs(cos) < 1: ang = int(np.arccos(cos) * 180 / np.pi) if det < 0: angle = ang else: angle = 360 - ang p2 = point x2, y2 = p2[1], p2[0] color = (150, 150, 150) line_color = (0, 255, 127) if angle <= 25: color = (0, 0, 255) r = 4 draw.ellipse((y2 - r, x2 - r, y2 + r, x2 + r), fill=color, width=5) draw.line((point1[0], point1[1], point2[0], point2[1]), fill=line_color, width=4) draw.line((point3[0], point3[1], point2[0], point2[1]), fill=line_color, width=4) elif angle >= 330: color = (255, 0, 0) r = 4 draw.ellipse((y2 - r, x2 - r, y2 + r, x2 + r), fill=color, width=5) draw.line((point1[0], point1[1], point2[0], point2[1]), fill=line_color, width=4) draw.line((point3[0], point3[1], point2[0], point2[1]), fill=line_color, width=4) print('cnt index: ', j) print('point2 ', point2) else: continue img = np.array(im) return img # + pycharm={"name": "#%%\n"} grainShow.img_show(img) io.imsave('малые углы.png', img)
cv/angles_beams.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/PacktPublishing/Machine-Learning-for-Time-Series-with-Python/blob/master/chapter10/Causal_CNN.ipynb" target="_parent\"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="bU3tHC0PArEI" tags=[] import numpy as np import pandas as pd from keras.layers import Conv1D, Input, Add, Activation, Dropout from keras.models import Sequential, Model from keras.layers.advanced_activations import LeakyReLU, ELU from keras import optimizers import tensorflow as tf callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10) def DC_CNN_Block(nb_filter, filter_length, dilation): def f(input_): residual = input_ layer_out = Conv1D( filters=nb_filter, kernel_size=filter_length, dilation_rate=dilation, activation='linear', padding='causal', use_bias=False )(input_) layer_out = Activation('selu')(layer_out) skip_out = Conv1D(1, 1, activation='linear', use_bias=False)(layer_out) network_in = Conv1D(1, 1, activation='linear', use_bias=False)(layer_out) network_out = Add()([residual, network_in]) return network_out, skip_out return f def DC_CNN_Model(length): input = Input(shape=(length,1)) l1a, l1b = DC_CNN_Block(32, 2, 1)(input) l2a, l2b = DC_CNN_Block(32, 2, 2)(l1a) l3a, l3b = DC_CNN_Block(32, 2, 4)(l2a) l4a, l4b = DC_CNN_Block(32, 2, 8)(l3a) l5a, l5b = DC_CNN_Block(32, 2, 16)(l4a) l6a, l6b = DC_CNN_Block(32, 2, 32)(l5a) l6b = Dropout(0.8)(l6b) l7a, l7b = DC_CNN_Block(32, 2, 64)(l6a) l7b = Dropout(0.8)(l7b) l8 = Add()([l1b, l2b, l3b, l4b, l5b, l6b, l7b]) l9 = Activation('relu')(l8) l21 = Conv1D(1, 1, activation='linear', use_bias=False)(l9) model = Model(inputs=input, outputs=l21) model.compile(loss='mae', optimizer=optimizers.adam_v2.Adam(), metrics=['mse']) return model # + tags=[] def fit_model(timeseries): length = len(timeseries)-1 model = DC_CNN_Model(length) model.summary() X = timeseries[:-1].reshape(1,length, 1) y = timeseries[1:].reshape(1,length, 1) model.fit(X, y, epochs=3000, callbacks=[callback]) return model def forecast(model, timeseries, horizon: int): length = len(timeseries)-1 pred_array = np.zeros(horizon).reshape(1, horizon, 1) X_test_initial = timeseries[1:].reshape(1,length,1) pred_array[: ,0, :] = model.predict(X_test_initial)[:, -1:, :] for i in range(horizon-1): pred_array[:, i+1:, :] = model.predict( np.append( X_test_initial[:, i+1:, :], pred_array[:, :i+1, :] ).reshape(1, length, 1))[:, -1:, :] return pred_array.flatten() def evaluate_timeseries(series, horizon: int): model = fit_model(series) pred_array = forecast(model, series, horizon) return pred_array, model # + id="LCzxf36xxvri" tags=[] import pandas as pd import matplotlib.pyplot as plt import seaborn as sns def show_result(y_test, predicted, ylabel="Passengers"): plt.figure(figsize=(16, 6)) plt.plot(y_test.index, predicted, 'o-', label="predicted") plt.plot(y_test.index, y_test, '.-', label="actual") plt.ylabel(ylabel) plt.legend() # + id="Tu-831LAylwK" tags=[] import pandas as pd values = [ 112., 118., 132., 129., 121., 135., 148., 148., 136., 119., 104., 118., 115., 126., 141., 135., 125., 149., 170., 170., 158., 133., 114., 140., 145., 150., 178., 163., 172., 178., 199., 199., 184., 162., 146., 166., 171., 180., 193., 181., 183., 218., 230., 242., 209., 191., 172., 194., 196., 196., 236., 235., 229., 243., 264., 272., 237., 211., 180., 201., 204., 188., 235., 227., 234., 264., 302., 293., 259., 229., 203., 229., 242., 233., 267., 269., 270., 315., 364., 347., 312., 274., 237., 278., 284., 277., 317., 313., 318., 374., 413., 405., 355., 306., 271., 306., 315., 301., 356., 348., 355., 422., 465., 467., 404., 347., 305., 336., 340., 318., 362., 348., 363., 435., 491., 505., 404., 359., 310., 337., 360., 342., 406., 396., 420., 472., 548., 559., 463., 407., 362., 405., 417., 391., 419., 461., 472., 535., 622., 606., 508., 461., 390., 432., ] idx = pd.date_range("1949-01-01", periods=len(values), freq="M") passengers = pd.Series(values, index=idx, name="passengers").to_frame() # + colab={"base_uri": "https://localhost:8080/"} id="boJVi8Jiya4D" outputId="e4605e96-9198-4ab0-a277-489933daaa0b" tags=[] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( passengers.passengers, passengers.passengers.shift(-1), shuffle=False ) HORIZON = len(y_test) predictions, model = evaluate_timeseries(X_train.values.reshape(-1, 1), horizon=HORIZON) # + colab={"base_uri": "https://localhost:8080/", "height": 379} id="fxQ1vflLzcmt" outputId="1dfb65e4-fe34-46e0-879a-a8a095493e05" tags=[] show_result(y_test[:HORIZON], predictions[:HORIZON], "Passengers") # + id="yH1hqiySztJ1" tags=[]
chapter10/Causal_CNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: '' # language: python # name: python3 # --- # + import math def get_low_points(ocean_map): result = [] rows = len(ocean_map) cols = len(ocean_map[0]) low_points = [] for rowind, row in enumerate(ocean_map): for colind, val in enumerate(row): low_point = True for col,row in [(-1,0),(0,-1),(1,0),(0,1)]: cc = colind + col rr = rowind + row if not ((0 <= rr < rows) and (0 <= cc < cols)): continue try: neighbour = ocean_map[rr][cc] except IndexError: print(f"{rr=},{cc=},{col=}{row=}{colind=}{rowind=}") continue if val >= neighbour: low_point = False break if low_point: result.append(val) low_points.append((rowind,colind)) return len(result) + sum(result), low_points def get_basain_sizes(low_points, ocean_map): rows = len(ocean_map) cols = len(ocean_map[0]) basains = [[col if col == 9 else 0 for col in row]for row in ocean_map] #dfs basains_id = 1 visited = set() for row, col in low_points: stack = [(row,col)] while len(stack) > 0: row, col = stack.pop() if (row,col) in visited: continue visited.add((row,col)) basains[row][col] = basains_id for rowd,cold in [(0, 1), (0, -1), (-1, 0), (1, 0)]: rr = rowd + row cc = cold + col if (rr < 0 or rows <= rr or cc < 0 or cols <= cc ): continue if basains[rr][cc] == 9: continue stack.append((rr,cc)) basains_id += 1 sizes = [0]*basains_id ids = [id for rows in basains for id in rows if id != 9] for id in ids: sizes[id]+=1 return sizes def test(): input_file_name = './test_input.txt' return main(input_file_name) def main(input_file_name="./input.txt"): input_file = open(input_file_name, 'r').read().strip() ocean_map = [[int(measurement) for measurement in list(row)] for row in input_file.split('\n')] finalScore1,low_points = get_low_points(ocean_map) finalScore2 = math.prod(sorted(get_basain_sizes(low_points,ocean_map),reverse = True)[:3]) return [finalScore1,finalScore2] if __name__ == "__main__": answers = test() print(f"Answer to test-question 1: {answers[0]}") print(f"Answer to test-question 2: {answers[1]}") assert answers[0] == 15, "failed test" assert answers[1] == 1134, "failed test" answers = main() print(f"Answer to question 1: {answers[0]}") print(f"Answer to question 2: {answers[1]}") #lines = value[0] #numbers = value[1]
2021/9/9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_py3.7tf2.7) # language: python # name: conda_py3.7tf2.7 # --- # # CNN apporach to Digit Recognizer problem # ### Import Libraries # + #using MNIST ("Modified National Institute of Standards and Technology") from https://www.kaggle.com/c/digit-recognizer, #this cnn achived accuracy of 0.99146 on kaggle test set and placed as 376/1741 on the leaderboard as of 27/02/2022 # %config Completer.use_jedi = False import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, roc_curve import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import tf2onnx import onnxruntime # - # ### Loading The Dataset train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') # ### Exploring The Dataset print("train size =", train.shape) print("test size =", test.shape) Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) print("X_train size = ", X_train.shape) #dropping The first column, called "label", is the digit that was drawn by the user. # ### Plot Random Sample index = int(np.random.rand() * X_train.shape[0]) img = X_train.iloc[index].to_numpy() img = img.reshape((28,28)) plt.imshow(img,cmap='gray') plt.title(train.iloc[index,0]) plt.axis("off") plt.show() # ### Normalization of the data X_train /= 255.0 test /= 255.0 # ### Reshape #28x28x1 3D matrices to work with keras, since image is in greyscale, 3rd dim is 1 X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) print("x_train shape: ",X_train.shape) print("test shape: ",test.shape) Y_train = tf.keras.utils.to_categorical(Y_train, num_classes = 10) #one-hot X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=3) print("x_train shape",X_train.shape) print("x_val shape",X_val.shape) print("y_train shape",Y_train.shape) print("y_val shape",Y_val.shape) # + model = tf.keras.Sequential() model.add(layers.Conv2D(filters = 8, kernel_size = (5,5),padding = 'Same', activation ='relu', input_shape = (28,28,1))) model.add(layers.MaxPool2D(pool_size=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Conv2D(filters = 16, kernel_size = (3,3),padding = 'Same', activation ='relu')) model.add(layers.MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(layers.Dropout(0.25)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation = "relu")) model.add(layers.Dropout(0.5)) model.add(layers.Dense(10, activation = "softmax")) model.compile(loss = "categorical_crossentropy", optimizer = keras.optimizers.Adam(learning_rate=0.001), metrics = ["accuracy"] ) datagen = ImageDataGenerator( rotation_range=2.5, zoom_range = 0.05, width_shift_range=0.05, height_shift_range=0.05 ) datagen.fit(X_train) history = model.fit(datagen.flow(X_train,Y_train, batch_size=32), epochs=20, verbose= 1, validation_data = (X_val,Y_val)) # - model.summary() plt.plot(history.history['loss'], color='r', label="Train loss") plt.plot(history.history['val_loss'], color='b', label="validation loss") plt.title("Test Loss") plt.xlabel("Number of Epochs") plt.ylabel("Loss") plt.legend() plt.show() plt.plot(history.history['acc'], color='r', label="train accuracy") plt.plot(history.history['val_acc'], color='b', label="validation accuracy") plt.title("Test Loss") plt.xlabel("Number of Epochs") plt.ylabel("Loss") plt.legend() plt.show() preds = model.predict(test) preds.shape preds_df = pd.DataFrame(preds) preds_df = preds_df.idxmax(axis=1) output_results_pd = pd.DataFrame({"ImageId": np.arange(1,test.shape[0]+1), "Label":preds_df }) output_results_pd.head(5) output_results_pd.to_csv("submission.csv", index = False) # + spec = (tf.TensorSpec((None, 28, 28, 1), tf.float32, name="input"),) output_path = model.name + ".onnx" model_proto, _ = tf2onnx.convert.from_keras(model, input_signature=spec, opset=13, output_path=output_path) output_names = [n.name for n in model_proto.graph.output] # -
CNN_digit_recog.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # non-specific libraies import matplotlib.pyplot as plt import numpy as np import keras import keras.utils as utils from keras import backend as K # - # # Load Dataset # --- # - ### Dataset operations # - data normalization # - data reshaping # - label gethering # + # pre-defined mnist dataest from keras.datasets import mnist batch_size = 128 n_classes = 10 # 10 digits 0 to 9 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = mnist.load_data() # training image dimentions 60000, 28, 28 n_sample, img_rows, img_cols = x_train.shape # Reshape data if K.image_data_format() == 'channels_first': x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols) input_shape = (1, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) # float limiting for optimized memmory (for GPU usage) # basic gaming GPUs only works with 32 bit float and 32 bit int x_train = x_train.astype("float32") x_test = x_test.astype("float32") # normalizing the input between [1 0] x_train /= 255 x_test /= 255 print('x max:{} x min {}'.format(x_train.max(), x_train.min())) print('train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') #convert class vectors to binary class matrices Y_train = utils.to_categorical(y_train, n_classes) Y_test = utils.to_categorical(y_test, n_classes) print("label: {} ,One hot encoding: {}".format(y_train[0], Y_train[0, :])) # - # ## Mnist data example for i in range(9): plt.subplot(3, 3, i+1) plt.imshow(np.transpose(x_train[i], (2, 0, 1))[0], cmap='gray') plt.axis("off") # # Model Definition # needed for model definition from keras.models import Sequential from keras.layers import Dense, Dropout from keras.layers import Activation, Flatten from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D # + model = Sequential() # Convolution2D(number_filters, row_size, column_size, input_shape=(number_channels, img_row, img_col)) model.add(Conv2D(16, kernel_size=(5, 5), input_shape=input_shape, padding='same')) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(MaxPooling2D(pool_size=(2,2), padding='valid')) model.add(Conv2D(16, kernel_size=(3, 3), padding='same')) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(MaxPooling2D(pool_size=(2,2), padding='valid')) model.add(Conv2D(8, (3, 3), padding='same')) model.add(Activation('relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(16, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(n_classes, activation='softmax')) # - # # Train LR = 1e-3 opt = keras.optimizers.Adam(lr=LR) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'] ) model.summary() utils.plot_model(model, to_file='images/conv_model.png') # ### Printed model graph # ------ # ![model graph](images/conv_model.png) n_epoch = 3 # we can increase epoch history = model.fit(x_train, Y_train, batch_size=batch_size, epochs=n_epoch, verbose=1, validation_split=0.2, shuffle=True) score = model.evaluate(x_test, Y_test, verbose=1) print ('Test score : {:.6f}'.format(score[0])) print ('Test accuracy: {:5.2f}%'.format(score[1] * 100)) # ## Training History Visiualization # + # Plot training & validation accuracy values plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('Model accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # Plot training & validation loss values plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # - # ## saving and loading weights model.save_weights('weights/mnist_conv.h5') print('model saved.') model.load_weights('weights/mnist_conv.h5') score = model.evaluate(x_test, Y_test, verbose=1) print ('Test score : {:.6f}'.format(score[0])) print ('Test accuracy: {:5.2f}%'.format(score[1] * 100)) # + ## Visualize sample result radn_n = np.random.randint(x_test.shape[0] - 9) res = model.predict_classes(x_test[radn_n:radn_n+9]) plt.figure(figsize=(10, 10)) for i in range(9): plt.subplot(3, 3, i+1) plt.imshow(np.transpose(x_test[i+radn_n], (2, 0, 1))[0], 'gray') plt.gca().get_xaxis().set_ticks([]) plt.gca().get_yaxis().set_ticks([]) plt.xlabel("prediction = %d" % res[i], fontsize= 18) # - # ## References # ---- # - [__Keras__][1] # - [The MNITS Database of handwritten digits][2] # # [1]: https://keras.io/ # [2]: http://yann.lecun.com/exdb/mnist/
.ipynb_checkpoints/keras_minst_conv-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 with Spark # language: python3 # name: python3 # --- # This is the second assignment for the Coursera course "Advanced Machine Learning and Signal Processing" # # # Just execute all cells one after the other and you are done - just note that in the last one you have to update your email address (the one you've used for coursera) and obtain a submission token, you get this from the programming assignment directly on coursera. # # Please fill in the sections labelled with "###YOUR_CODE_GOES_HERE###" !wget https://github.com/IBM/coursera/raw/master/coursera_ml/a2.parquet # Now it’s time to have a look at the recorded sensor data. You should see data similar to the one exemplified below…. # # + df=spark.read.load('a2.parquet') df.createOrReplaceTempView("df") spark.sql("SELECT * from df").show() # - # Please create a VectorAssembler which consumes columns X, Y and Z and produces a column “features” # from pyspark.ml.feature import VectorAssembler vectorAssembler = ###YOUR_CODE_GOES_HERE###" # Please instantiate a classifier from the SparkML package and assign it to the classifier variable. Make sure to either # 1. Rename the “CLASS” column to “label” or # 2. Specify the label-column correctly to be “CLASS” # # + from pyspark.ml.classification import ###YOUR_CODE_GOES_HERE###" classifier = ###YOUR_CODE_GOES_HERE###" # - # Let’s train and evaluate… # from pyspark.ml import Pipeline pipeline = Pipeline(stages=[vectorAssembler, classifier]) model = pipeline.fit(df) prediction = model.transform(df) prediction.show() # + from pyspark.ml.evaluation import MulticlassClassificationEvaluator binEval = MulticlassClassificationEvaluator().setMetricName("accuracy") .setPredictionCol("prediction").setLabelCol("CLASS") binEval.evaluate(prediction) # - # If you are happy with the result (I’m happy with > 0.55) please submit your solution to the grader by executing the following cells, please don’t forget to obtain an assignment submission token (secret) from the Coursera’s graders web page and paste it to the “secret” variable below, including your email address you’ve used for Coursera. (0.55 means that you are performing better than random guesses) # !rm -Rf a2_m2.json prediction = prediction.repartition(1) prediction.write.json('a2_m2.json') !rm -f rklib.py !wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py # + import zipfile def zipdir(path, ziph): for root, dirs, files in os.walk(path): for file in files: ziph.write(os.path.join(root, file)) zipf = zipfile.ZipFile('a2_m2.json.zip', 'w', zipfile.ZIP_DEFLATED) zipdir('a2_m2.json', zipf) zipf.close() # - !base64 a2_m2.json.zip > a2_m2.json.zip.base64 # + from rklib import submit key = "J<KEY>" part = "G4P6f" email = None###YOUR_CODE_GOES_HERE###" secret = None###YOUR_CODE_GOES_HERE###" with open('a2_m2.json.zip.base64', 'r') as myfile: data=myfile.read() submit(email, secret, key, part, [part], data)
coursera_ml/.ipynb_checkpoints/AssignmentML2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # <div class="frontmatter text-center"> # <h1>Introduction to Data Science and Programming</h1> # <h2>Class 14: Data relationships</h2> # <h3>IT University of Copenhagen, Fall 2019</h3> # <h3>Instructor: <NAME></h3> # </div> # - import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import scipy.stats as stats # + [markdown] slideshow={"slide_type": "slide"} # ## Loading a mixed variable type data set # + slideshow={"slide_type": "-"} # Data set downloaded from: https://www.kaggle.com/mirichoi0218/insurance/downloads/insurance.zip/1 # !head files/insurance.csv # - # The data set contains demographic information about individuals, where they live (region), whether they smoke, and how much they pay for health insurance (charges). # + [markdown] slideshow={"slide_type": "slide"} # We read the data as structured arrays using `numpy.genfromtxt` with `dtype=None`. `names=True` ensures we can access single columns using the header. More info: https://numpy.org/devdocs/user/basics.io.genfromtxt.html # - datamixed = np.genfromtxt('files/insurance.csv', delimiter=',', dtype=None, names=True, encoding='utf8') print(datamixed) print(datamixed['smoker']) # + fig = plt.figure(figsize=(5, 4)) axes = fig.add_axes([0, 0, 1, 1]) # Plot data axes.scatter(datamixed['bmi'], datamixed['charges'], label='Data', edgecolors='w'); axes.legend(loc='upper left'); axes.set_xlabel('BMI'); axes.set_ylabel('Charges'); axes.set_title('Scatter plot'); # + slideshow={"slide_type": "slide"} fig = plt.figure(figsize=(5, 4)) axes = fig.add_axes([0, 0, 1, 1]) # Plot data axes.scatter(datamixed['bmi'], datamixed['charges'], label='Data', edgecolors='w'); # Plot fit slope, intercept, r_value, _, _ = stats.linregress(datamixed['bmi'], datamixed['charges']) line = slope * datamixed['bmi'] + intercept axes.plot(datamixed['bmi'], line, linewidth=2.5, color='k', label='Fit') axes.text(45, 24000, "r = " + str(round(r_value, 3))) axes.legend(loc='upper left'); axes.set_xlabel('BMI'); axes.set_ylabel('Charges'); axes.set_title('Scatter plot'); # - # The data points relate in a very weird way. Some stay on the bottom, some go up, and there is some "bottleneck" at BMI=30. Sure, we can fit a linear trend, but is it meaningful? We have to be careful. # + [markdown] slideshow={"slide_type": "slide"} # Maybe adding information from a categorical variable clears things up: # + slideshow={"slide_type": "-"} masksmoker = (datamixed['smoker'] == 'yes') # Easy indexing: The power of numpy fig = plt.figure(figsize=(5, 4)) axes = fig.add_axes([0, 0, 1, 1]) # Plot data axes.scatter(datamixed['bmi'][~masksmoker], datamixed['charges'][~masksmoker], color='orange', label="Data nonsmoker", edgecolors='w'); axes.scatter(datamixed['bmi'][masksmoker], datamixed['charges'][masksmoker], color='k', label="Data smoker", edgecolors='w'); axes.legend(); axes.set_xlabel('BMI'); axes.set_ylabel('Charges'); axes.set_title('Scatter plot'); # - # Aha! We have two sub-groups with different relations: Smokers and non-smokers are treated differently for insurance purposes. Next, let us do the linear regression on each of the sub-groups: # + slideshow={"slide_type": "slide"} fig = plt.figure(figsize=(5, 4)) axes = fig.add_axes([0, 0, 1, 1]) # Plot data axes.scatter(datamixed['bmi'][~masksmoker], datamixed['charges'][~masksmoker], color='orange', label="Data nonsmoker", edgecolors='w'); axes.scatter(datamixed['bmi'][masksmoker], datamixed['charges'][masksmoker], color='k', label="Data smoker", edgecolors='w'); # Plot fits slope, intercept, r_value, _, _ = stats.linregress(datamixed['bmi'][~masksmoker], datamixed['charges'][~masksmoker]) axes.plot(datamixed['bmi'][~masksmoker], slope * datamixed['bmi'][~masksmoker] + intercept, linewidth=2.5, color='orange', label='Fit nonsmoker') axes.text(45, 13000, "r = " + str(round(r_value, 3))) slope, intercept, r_value, _, _ = stats.linregress(datamixed['bmi'][masksmoker], datamixed['charges'][masksmoker]) axes.plot(datamixed['bmi'][masksmoker], slope * datamixed['bmi'][masksmoker] + intercept, linewidth=2.5, color='k', label='Fit smoker') axes.text(42, 58000, "r = " + str(round(r_value, 3))) axes.legend(); axes.set_xlabel('BMI'); axes.set_ylabel('Charges'); axes.set_title('Scatter plot'); # - # Our conclusion, if we stop here: # * **For non-smokers**: There is a very weak linear relation between BMI and charges. Apparently the insurance does not care too much about your BMI if you do not smoke # * **For smokers**: There seem to be 2 sub-groups of smokers treated differently by the insurance: Those with BMI below 30 and those with BMI above 30. The linear regression through both these groups shows that High-BMI smokers pay higher charges. This should not be the end of the analysis, however: Since we have two sub-groups, we have to question whether a linear regression through them is justified. # + [markdown] slideshow={"slide_type": "slide"} # ## Relating categorical and quantitative data # + fig = plt.figure(figsize=(4, 3)) axes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1) axes.boxplot([datamixed['charges'][masksmoker], datamixed['charges'][~masksmoker]]); axes.set_ylabel('Charges') axes.set_xticklabels(['Smokers', 'Nonsmokers']); # + slideshow={"slide_type": "slide"} fig = plt.figure(figsize=(4, 3)) axes = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height (range 0 to 1) axes.boxplot([datamixed['charges'][datamixed['region'] == 'southwest'], datamixed['charges'][datamixed['region'] == 'southeast'], datamixed['charges'][datamixed['region'] == 'northwest'], datamixed['charges'][datamixed['region'] == 'northeast']]); axes.set_ylabel('Charges') axes.set_xticklabels(['South west', 'South east', 'North West', 'North east']);
class14_datarelationships/class14_datarelationships.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/andrheadennizecuyme/LinearAlgebra_2ndSem/blob/main/Assignment06.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="_cZFwwF88gOa" # #Linear Algebra for ChE # #Laboratory 5: Matrix Operations # + [markdown] id="MFokOZ4D8lPs" # #Objectives # At the end of this activity you will be able to: # 1. Be familiar with the fundamental matrix operations. # 2. Apply the operations to solve intermediate equations. # 3. Apply matrix algebra in engineering solutions. # + [markdown] id="1N0isy508nOj" # #Discussions # # + id="6PtzxcRL8q71" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + [markdown] id="fDglWlW68tjh" # #Transposition # One of the fundamental operations in matrix algebra is Transposition. The transpose of a matrix is done by flipping the values of its elements over its diagonals. With this, the rows and columns from the original matrix will be switched. So for a matrix $A$ its transpose is denoted as $A^T$. So for example: # + [markdown] id="N2Ua24TF9i2t" # $$A = \begin{bmatrix} 1 & 2 & 5\\5 & -1 &0 \\ 0 & -3 & 3\end{bmatrix} $$ # + [markdown] id="IoQQbMtX_70q" # $$ A^T = \begin{bmatrix} 1 & 5 & 0\\2 & -1 &-3 \\ 5 & 0 & 3\end{bmatrix}$$ # + [markdown] id="hM1ppnahKSdl" # This can now be achieved programmatically by using `np.transpose()` or using the `T` method. # + colab={"base_uri": "https://localhost:8080/"} id="bcCGDXjh904_" outputId="6b63724f-6759-46a3-f121-c9f4b8ba5ea9" A = np.array([ [341, 9, 32], [8, -1, 00], [45, -90, 9] ]) A # + colab={"base_uri": "https://localhost:8080/"} id="lAyKkFaX_WOr" outputId="765eb386-1c2c-4bea-d554-590c7ff77e62" AT1 = np.transpose(A) AT1 # + colab={"base_uri": "https://localhost:8080/"} id="4fz-QvMP_XPS" outputId="bc9b8486-6057-47c0-861f-ac0fe2abce15" AT2 = A.T AT2 # + colab={"base_uri": "https://localhost:8080/"} id="CWX8fKtw_VLU" outputId="b6df735c-f08d-4038-b5c5-ccd527834415" B = np.array([ [9,8,7,6], [99,88,77,66], ]) B.shape # + colab={"base_uri": "https://localhost:8080/"} id="StOkpJTT_cR6" outputId="f53628ab-6dd9-4748-fd70-63e18782aacc" np.transpose(B).shape # + colab={"base_uri": "https://localhost:8080/"} id="I6fMiuGY_sMe" outputId="e335626f-67c6-476e-cb08-f58ffa6953d7" B.T.shape # + colab={"base_uri": "https://localhost:8080/"} id="VibiKq8WANvO" outputId="d597c4f6-e10b-4441-c185-0afc5e0e0e36" BT = B.T BT # + [markdown] id="Jnjp_z_WAR7c" # #Dot Product/Inner Product # + [markdown] id="wS42sEUjAfgC" # The dot product from laboratory activity before, we will try to implement the same operation with matrices. In matrix dot product we are going to get the sum of products of the vectors by row-column pairs. So if we have two matrices $X$ and $Y$: # # $$X = \begin{bmatrix}x_{(0,0)}&x_{(0,1)}\\ x_{(1,0)}&x_{(1,1)}\end{bmatrix}, Y = \begin{bmatrix}y_{(0,0)}&y_{(0,1)}\\ y_{(1,0)}&y_{(1,1)}\end{bmatrix}$$ # # The dot product will then be computed as: # $$X \cdot Y= \begin{bmatrix} x_{(0,0)}*y_{(0,0)} + x_{(0,1)}*y_{(1,0)} & x_{(0,0)}*y_{(0,1)} + x_{(0,1)}*y_{(1,1)} \\ x_{(1,0)}*y_{(0,0)} + x_{(1,1)}*y_{(1,0)} & x_{(1,0)}*y_{(0,1)} + x_{(1,1)}*y_{(1,1)} # \end{bmatrix}$$ # # We assign values to $X$ and $Y$: # $$X = \begin{bmatrix}1&2\\ 0&1\end{bmatrix}, Y = \begin{bmatrix}-1&0\\ 2&2\end{bmatrix}$$ # + [markdown] id="0dJ-dK7oAkzj" # $$X \cdot Y= \begin{bmatrix} 1*-1 + 2*2 & 1*0 + 2*2 \\ 0*-1 + 1*2 & 0*0 + 1*2 \end{bmatrix} = \begin{bmatrix} 3 & 4 \\2 & 2 \end{bmatrix}$$ # This could be achieved programmatically using `np.dot()`, `np.matmul()` or the `@` operator. # + id="mekCh46bBFCr" X = np.array([ [6,3], [0,6] ]) Y = np.array([ [-5,0], [9,9] ]) # + colab={"base_uri": "https://localhost:8080/"} id="m6AA5itVBF9C" outputId="77cdbcea-1eee-4569-aa4c-9ba36fe0cfcc" np.array_equiv(X, Y) # + colab={"base_uri": "https://localhost:8080/"} id="gWaEQNFHBHnG" outputId="86e24696-63a5-43e8-b181-2da06c807fe7" np.dot(X,Y) # + colab={"base_uri": "https://localhost:8080/"} id="I5rD3AbqBMBk" outputId="584b8e01-ebc2-4fcc-da46-6bf62a4a4813" X.dot(Y) # + colab={"base_uri": "https://localhost:8080/"} id="h8KaKkXqBQUj" outputId="4404f702-98ee-40c3-b8a3-c9399dfcfac8" X @ Y # + colab={"base_uri": "https://localhost:8080/"} id="33fCMaoNBSMS" outputId="820a7042-f966-494c-fd00-4fcdca42585f" np.matmul(X,Y) # + id="xk6JdLBkBSQm" M = np.array([ [1,7,4], [8,7,3], [2,7,0] ]) N = np.array([ [-9,1,2], [5,3,5], [8,7,6] ]) # + colab={"base_uri": "https://localhost:8080/"} id="g86Mv1ezBa0H" outputId="a6172811-59b0-4a1e-9dba-b21084fc82c3" M @ N # + colab={"base_uri": "https://localhost:8080/"} id="tO8Atr7bBc2c" outputId="6ca796d8-7d7b-46a2-c8f2-a41a078dc92d" M.dot(N) # + colab={"base_uri": "https://localhost:8080/"} id="m2yY1qAmBeQ6" outputId="9d004cbd-bf6b-49ac-8a9f-16aa4508a98f" np.matmul(M, N) # + colab={"base_uri": "https://localhost:8080/"} id="rEK4YApZBgEv" outputId="77ab999d-9cc1-417d-8d8b-94626fb12c00" np.dot(M, N) # + [markdown] id="dp0j5kwbB5wE" # In matrix dot products there are additional rules compared with vector dot products. Since vector dot products were just in one dimension there are less restrictions. Since now we are dealing with Rank 2 vectors we need to consider some rules: # # ### Rule 1: The inner dimensions of the two matrices in question must be the same. # # Given a matrix $A$ with a shape of $(a,b)$ where $a$ and $b$ are any integers. If we want to do a dot product between $A$ and another matrix $B$, then matrix $B$ should have a shape of $(b,c)$ where $b$ and $c$ are any integers. So for given the following matrices: # # $$A = \begin{bmatrix}2&4\\5&-2\\6&-8\\0&1\end{bmatrix}, B = \begin{bmatrix}1&1\\3&3\\9&-9\\-1&-2\end{bmatrix}, C = \begin{bmatrix}0&1&1\\8&8&8\\1&1&2\end{bmatrix}$$ # # In this case $A$ has a shape of $(4,2)$, $B$ has a shape of $(4,2)$ and $C$ has a shape of $(3,3)$. So the only matrix pairs that is eligible to perform dot product is matrices $A \cdot C$, or $B \cdot C$. # + colab={"base_uri": "https://localhost:8080/"} id="gTKkhxkLB8X6" outputId="e65612fb-d857-46c2-dd22-7ba2b4a0c2b2" X = np.array([ [6, 99], [7, -7], [5, 8] ]) Y = np.array([ [2,2], [4,4], [-88,-83] ]) Z = np.array([ [0,7,7], [4,4,2] ]) print(X.shape) print(Y.shape) print(Z.shape) # + colab={"base_uri": "https://localhost:8080/"} id="sP4ik0PRCdDE" outputId="a3a0f985-7752-4711-df00-9fe488c1c7e6" X @ Z # + colab={"base_uri": "https://localhost:8080/"} id="SmgElzEdCglB" outputId="ef057d27-b264-4ca8-eee0-3f138752c179" Y @ Z # + [markdown] id="quSbswSQCrIf" # The shape of the dot product changed and its shape is not the same as any of the matrices we used. The shape of a dot product is actually derived from the shapes of the matrices used. So recall matrix $A$ with a shape of $(a,b)$ and matrix $B$ with a shape of $(b,c)$, $A \cdot B$ should have a shape $(a,c)$. # + colab={"base_uri": "https://localhost:8080/"} id="rACiFy8RCsYG" outputId="5d397f81-f2cd-4587-efde-c67e690ad92b" X @ Y.T # + colab={"base_uri": "https://localhost:8080/"} id="n1NPDS1QCzzw" outputId="1f588db6-e38a-4a6a-e2f8-5119a6a62289" O = np.array([ [8,55,4,0] ]) P = np.array([ [1,1,1,-1] ]) print(O.shape) print(P.shape) # + colab={"base_uri": "https://localhost:8080/"} id="E-ltSwt8D8aB" outputId="5633a0e0-d425-4749-c0e4-954bc4fb4f5d" P.T @ O # + id="vc7KXEHZD9DU" O @ P.T # + [markdown] id="k5V-ynMQEOVX" # And you can see that when you try to multiply A and B, it returns `ValueError` pertaining to matrix shape mismatch. # + [markdown] id="1jXEFJuDERnE" # ### Rule 2: Dot Product has special properties # # Dot products are prevalent in matrix algebra, this implies that it has several unique properties and it should be considered when formulation solutions: # 1. $A \cdot B \neq B \cdot A$ # 2. $A \cdot (B \cdot C) = (A \cdot B) \cdot C$ # 3. $A\cdot(B+C) = A\cdot B + A\cdot C$ # 4. $(B+C)\cdot A = B\cdot A + C\cdot A$ # 5. $A\cdot I = A$ # 6. $A\cdot \emptyset = \emptyset$ # + id="Ap5SHSyQEWC3" A = np.array([ [8,7,6], [6,7,8], [7,7,7] ]) B = np.array([ [4,4,4], [2,4,8], [2,4,8] ]) C = np.array([ [55,55,0], [0,55,55], [55,0,55] ]) # + colab={"base_uri": "https://localhost:8080/"} id="NZGdveagEWs2" outputId="8376239b-58af-4699-c2de-3f3406536e58" np.eye(3) # + colab={"base_uri": "https://localhost:8080/"} id="BnHEvIl-EYS8" outputId="8bbbe423-0a58-448c-bf2e-1d08004f6443" A.dot(np.eye(3)) # + colab={"base_uri": "https://localhost:8080/"} id="qMobu4RXEZs2" outputId="162ca43f-0dfc-4dbb-9ca2-7a7a668f9931" np.array_equal(A@B, B@A) # + colab={"base_uri": "https://localhost:8080/"} id="eKxIo9vWEch1" outputId="4605a4a2-6f5b-47bb-9aad-2b002b77a8a8" E = A @ (B @ C) E # + colab={"base_uri": "https://localhost:8080/"} id="FFAMmv8TEdKv" outputId="897abf05-73db-4883-cd1a-e4e331dcc153" F = (A @ B) @ C F # + colab={"base_uri": "https://localhost:8080/"} id="9uCcabG-EefA" outputId="28f46f13-2658-4686-f248-23151f1e515b" np.array_equal(E, X) # + id="0-bdSxdnEf_M" colab={"base_uri": "https://localhost:8080/"} outputId="6def3532-653a-4f96-e2b8-2ca2b6e81aa6" np.array_equiv(E, F) # + colab={"base_uri": "https://localhost:8080/"} id="kiXjwjwXFpW3" outputId="2a895082-28e1-4723-b16b-acc780d185a3" np.eye(9) # + colab={"base_uri": "https://localhost:8080/"} id="CpNfU79PFysy" outputId="bc70262d-5054-4205-810d-13001fd41955" A @ E # + colab={"base_uri": "https://localhost:8080/"} id="KjGVOG7GF0Ia" outputId="f30747f8-de57-4d4f-b7c0-0e1e7901911d" z_mat = np.zeros(A.shape) z_mat # + colab={"base_uri": "https://localhost:8080/"} id="o1Q0naSEF0lc" outputId="8bf0d505-1d2e-4a1c-f87e-957bb84a6ec0" a_dot_z = A.dot(np.zeros(A.shape)) a_dot_z # + colab={"base_uri": "https://localhost:8080/"} id="IGrrhYfhF27g" outputId="3658e87d-5b0a-494f-ef51-e18ab8591232" np.array_equal(a_dot_z,z_mat) # + colab={"base_uri": "https://localhost:8080/"} id="1WnvcHJhF3XA" outputId="c0532284-0c68-469b-b6eb-ac0026ae95c6" null_mat = np.empty(A.shape, dtype=float) null = np.array(null_mat,dtype=float) print(null) np.allclose(a_dot_z,null) # + [markdown] id="_G1k__KGF5NH" # #Determinant # + [markdown] id="eXVH1wv-F8YN" # A determinant is a scalar value derived from a square matrix. The determinant is a fundamental and important value used in matrix algebra. Although it will not be evident in this laboratory on how it can be used practically, but it will be reatly used in future lessons. # # The determinant of some matrix $A$ is denoted as $det(A)$ or $|A|$. So let's say $A$ is represented as: # $$A = \begin{bmatrix}a_{(0,0)}&a_{(0,1)}\\a_{(1,0)}&a_{(1,1)}\end{bmatrix}$$ # We can compute for the determinant as: # $$|A| = a_{(0,0)}*a_{(1,1)} - a_{(1,0)}*a_{(0,1)}$$ # Thus we have $A$ as: # $$A = \begin{bmatrix}1&4\\0&3\end{bmatrix}, |A| = 3$$ # # But you might wonder how about square matrices beyond the shape $(2,2)$? We can approach this problem by using several methods such as co-factor expansion and the minors method. This can be taught in the lecture of the laboratory but we can achieve the strenuous computation of high-dimensional matrices programmatically using Python. We can achieve this by using `np.linalg.det()`. # + colab={"base_uri": "https://localhost:8080/"} id="40UvHO3zGAdj" outputId="2990f93b-b0d1-4155-f78c-9ff378f21227" A = np.array([ [8,45], [0.0,99] ]) np.linalg.det(A) # + colab={"base_uri": "https://localhost:8080/"} id="c7NUVDTQGDia" outputId="2de58bc2-d75a-4ef9-a761-617216ce9d39" B = np.array([ [7, -999, 1], [77, -99 ,-1], [777, -9, 1] ]) np.linalg.det(B) # + colab={"base_uri": "https://localhost:8080/"} id="S9G-T6KaGFSu" outputId="ed04a6d4-4b3e-464c-defe-384871ed41d1" ## Now other mathematics classes would require you to solve this by hand, ## and that is great for practicing your memorization and coordination skills ## but in this class we aim for simplicity and speed so we'll use programming ## but it's completely fine if you want to try to solve this one by hand. B = np.array([ [9,-63,5,6], [0,3,71,34], [93,81,8,2], [95,2,6,48] ]) np.linalg.det(B) # + [markdown] id="MYIkzHVjGLR6" # #Inverse # + [markdown] id="XW5uJCRaGOd_" # The inverse of a matrix is another fundamental operation in matrix algebra. Determining the inverse of a matrix let us determine if its solvability and its characteristic as a system of linear equation — we'll expand on this in the nect module. Another use of the inverse matrix is solving the problem of divisibility between matrices. Although element-wise division exists but dividing the entire concept of matrices does not exists. Inverse matrices provides a related operation that could have the same concept of "dividing" matrices. # # Now to determine the inverse of a matrix we need to perform several steps. So let's say we have a matrix $M$: # $$M = \begin{bmatrix}1&7\\-3&5\end{bmatrix}$$ # First, we need to get the determinant of $M$. # $$|M| = (1)(5)-(-3)(7) = 26$$ # Next, we need to reform the matrix into the inverse form: # $$M^{-1} = \frac{1}{|M|} \begin{bmatrix} m_{(1,1)} & -m_{(0,1)} \\ -m_{(1,0)} & m_{(0,0)}\end{bmatrix}$$ # That will be: # $$M^{-1} = \frac{1}{26} \begin{bmatrix} 5 & -7 \\ 3 & 1\end{bmatrix} = \begin{bmatrix} \frac{5}{26} & \frac{-7}{26} \\ \frac{3}{26} & \frac{1}{26}\end{bmatrix}$$ # For higher-dimension matrices you might need to use co-factors, minors, adjugates, and other reduction techinques. To solve this programmatially we can use `np.linalg.inv()`. # + colab={"base_uri": "https://localhost:8080/"} id="BJYI4wotGWVd" outputId="42635c82-df91-4757-987c-ee3718fd930c" M = np.array([ [0,13], [-99, 80] ]) np.array(M @ np.linalg.inv(M), dtype=int) # + colab={"base_uri": "https://localhost:8080/"} id="pRaXXk2OGW6W" outputId="9cf44d7a-a5f8-453c-ebc8-1e2dc0c1d03c" P = np.array([ [9, 9, 0], [4, 2, -6], [8, 9, 7] ]) Q = np.linalg.inv(P) Q # + colab={"base_uri": "https://localhost:8080/"} id="htRK8quTGXBo" outputId="274ea5af-b213-40fb-e2c1-a2782c140c12" P @ Q # + colab={"base_uri": "https://localhost:8080/"} id="MLsHio7TGapg" outputId="fe24cf14-faa5-48a8-e4a2-bb2c8e465ad7" ## And now let's test your skills in solving a matrix with high dimensions: N = np.array([ [18,5,23,1,0,33,5], [0,45,0,11,2,4,2], [5,9,20,0,0,0,3], [1,6,4,4,8,43,1], [8,6,8,7,1,6,1], [-5,15,2,0,0,6,-30], [-2,-5,1,2,1,20,12], ]) N_inv = np.linalg.inv(N) np.array(N @ N_inv,dtype=int) # + [markdown] id="L3dtVy1eGkkr" # To validate the wether if the matric that you have solved is really the inverse, we follow this dot product property for a matrix $M$: # $$M\cdot M^{-1} = I$$ # + colab={"base_uri": "https://localhost:8080/"} id="Hg1U92tfGgBW" outputId="96e81406-361f-4168-c3ed-647569ce7290" squad = np.array([ [2.0, 1.0, 1.5], [1.25, 1.0, 1.5], [1.75, 1.5, 1.0] ]) weights = np.array([ [0.2, 0.2, 0.6] ]) p_grade = squad @ weights.T p_grade # + [markdown] id="jOSLhgVxGpQ0" # #ACTIVITY # + [markdown] id="j9SEtfb9GuGH" # ##Task 1 # + [markdown] id="6vNWMaLWGufq" # Prove and implement the remaining 6 matrix multiplication properties. You may create your own matrices in which their shapes should not be lower than $(3,3)$. # # In your methodology, create individual flowcharts for each property and discuss the property you would then present your proofs or validity of your implementation in the results section by comparing your result to present functions from NumPy. # + id="Y3nqgk7cGztm" import numpy as np import matplotlib.pyplot as plt # %matplotlib inline P = np.array ([ [9,7,5,3], [60,70,80,90], [0,0,0,0], [-2,4,-6,8] ]) Q = np.array ([ [1,-5,9,4], [4,1,5,9], [2,-3,1,7], [-4,2,9,1] ]) R = np.array ([ [55,65,70,75], [31,-13,81,-18], [3,9,9,12], [67,77,87,97] ]) # + id="2qa78WNO5yMD" S = Q.dot(P) T = R.dot(S) U = S.dot(R) V = Q.dot(R) W = P.dot(S) X = Q + R Y = S + P Z = T + U # + colab={"base_uri": "https://localhost:8080/"} id="gD4wma6Z5d1m" outputId="586ccb4b-be63-47f8-f3d4-648309b119fd" print('PROPERTY 1') {np.array_equiv(S,V)} print('AB=BA') print('Matrix AB:') print(np.matmul(P,Q)) print('Matrix BA:') print(np.matmul(Q,P)) print('APPROVED!') # + colab={"base_uri": "https://localhost:8080/"} id="0oaaHEJk5fw2" outputId="07717547-ba25-4aae-865a-aba4ed7f4081" print('PROPERTY 2') {np.array_equiv(Q.dot(P),P.dot(S))} print('A(BC)=(AB)C') print('Matrix A(BC):') print(np.matmul(Q,P)) print('Matrix (AB)C:') print(np.matmul(P,S)) print('APPROVED!') # + colab={"base_uri": "https://localhost:8080/"} id="cdrn2wTP5iFL" outputId="21e8ad20-078c-4bb5-d4f6-fe5f5fc2e2c2" print('PROPERTY 3') {np.array_equiv(S.dot(R),Q)} print('Matrix A(B+C)') print(np.matmul(S,R)) print('Matrix (AB)+(AC):') print(np.add(R,Q)) print('APPROVED!') # + colab={"base_uri": "https://localhost:8080/"} id="ovmB7zQt5j2c" outputId="afe9e119-78c6-4a23-933e-1a08b13f0cf6" print('PROPERTY 4') {np.array_equiv(P.dot(S),Z)} print('(B+C)A=(BA)+(CA)') print('Matrix (B+C)A:') print(np.matmul(P,S)) print('Matrix (BA)+(CA):') print(np.add(S,Z)) print('APPROVED!') # + colab={"base_uri": "https://localhost:8080/"} id="XZytLNx25l1q" outputId="d1e3efa3-344a-45ef-eca3-71d10603ab6c" print('PROPERTY 5') {np.array_equiv(Q.dot(1),V)} print('A(1)=A') print('Matrix A(1):') print(np.multiply(Q,1)) print('Matrix A:') print(Q) print('APPROVED!') # + colab={"base_uri": "https://localhost:8080/"} id="T7-Zizq65nm_" outputId="c18decbc-9996-47e9-a81b-d8d94534d9eb" print('PROPERTY 6') {np.array_equiv(P.dot(0),0)} print('A(0)=0') print('Matrix A(0):') print(np.multiply(P,0)) print('APPROVED!') # + [markdown] id="yvIOx7QUG2K-" # #CONCLUSION # + [markdown] id="mrojm56YG53u" # In the activity performed about Matrix Operations, the students were able to # understand and know all of the Matrix Operations, which are Transposition, Determinant, # Inverse, and the Dot Product/Inner Product, including its Rule 1 and Rule 2 that presents the # six multiplication properties. It can be inferred that the codes would not work if the proper # declaration for each matrix was not used. The students were also able to put all of their # learnings from using Google Colaboratory, Python programming, and solving of Matrix into # practice by creating their own sample matrices that show Matrix Operations, and completing # Task 1 which aims to make a matrices on the different multiplication properties. Thus, the # students clearly defined and explained the entire results made in Google Colaboratory, as well # as Task 1. Furthermore, after accomplishing Task 1, the students were possibly done six flow # charts that signify how they appropriately know the given task, Python programming, and the # six dot product properties. After meeting all of the objectives, the students were able to # discern and properly concluded the laboratory report. #
Assignment06.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="VwK5-9FIB-lu" # # Natural Language Processing # + [markdown] id="X1kiO9kACE6s" # ## Importing the libraries # + id="7QG7sxmoCIvN" colab={"base_uri": "https://localhost:8080/"} outputId="c0ea9e44-0c86-4f8d-cbbc-3525ad82885e" import numpy as np import matplotlib.pyplot as plt import pandas as pd import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer # + [markdown] id="wTfaCIzdCLPA" # ## Importing the dataset # + id="UCK6vQ5QCQJe" dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3) # + [markdown] id="Qekztq71CixT" # ## Cleaning the texts # + id="8u_yXh9dCmEE" corpus = [] for i in range(0, 1000): review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i]) review = review.lower() review = review.split() ps = PorterStemmer() all_stopwords = stopwords.words('english') review = [ps.stem(word) for word in review if not word in set(all_stopwords)] review = ' '.join(review) corpus.append(review) # + id="KpGWdrzGoAsL" colab={"base_uri": "https://localhost:8080/"} outputId="638ebdca-b9de-48f2-b5b2-29ba3dd34bea" print(corpus) # + [markdown] id="CLqmAkANCp1-" # ## Creating the Bag of Words model # + id="qroF7XcSCvY3" from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_features = 1500) X = cv.fit_transform(corpus).toarray() y = dataset.iloc[:, -1].values # + [markdown] id="DH_VjgPzC2cd" # ## Splitting the dataset into the Training set and Test set # + id="qQXYM5VzDDDI" from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0) # + [markdown] id="VkIq23vEDIPt" # ## Training the Naive Bayes model on the Training set # + id="DS9oiDXXDRdI" colab={"base_uri": "https://localhost:8080/"} outputId="d83cee56-e2b9-49f4-c3c2-990db4ca045e" from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) # + [markdown] id="1JaRM7zXDWUy" # ## Predicting the Test set results # + id="Iif0CVhFDaMp" colab={"base_uri": "https://localhost:8080/"} outputId="2ef3bd1a-5a78-4936-b82b-ac34e697292f" y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)) # + [markdown] id="xoMltea5Dir1" # ## Making the Confusion Matrix # + id="Xj9IU6MxDnvo" colab={"base_uri": "https://localhost:8080/"} outputId="d116e25f-0284-419b-ca39-d4b3cba80e90" from sklearn.metrics import confusion_matrix, accuracy_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="aXBsjsyjX6Av" outputId="db61d54a-faec-4332-f459-6f1cd0d6b1ab" class Predictor: def __init__(self,pred,transformer): self.pred = pred self.transformer = transformer def predict(self, text): text=text.replace("n't"," not") ans=self.pred.predict(self.transformer.transform([text]).toarray())[0] if "not" in text: return(1-ans) else: return(ans) predictor = Predictor(pred=classifier, transformer=cv) predictor.predict("This isn't good") # + id="hHQVECtFdTW1" import pickle predFile = open("./predFile.pickle","wb") pickle.dump(predictor,predFile)
Machine-Learning/Notebook/Positivity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook for preparing and saving MNIST graphs import numpy as np import torch import pickle import time import os # %matplotlib inline import matplotlib.pyplot as plt # # Download MNIST dataset if not os.path.isfile('superpixels.zip'): print('downloading..') # !curl https://www.dropbox.com/s/y2qwa77a0fxem47/superpixels.zip?dl=1 -o superpixels.zip -J -L -k # !tar -xvf superpixels.zip -C ../ else: print('File already downloaded') # # Convert to DGL format and save with pickle import os os.chdir('../../') # go to root folder of the project print(os.getcwd()) # + import pickle # %load_ext autoreload # %autoreload 2 from data.superpixels import SuperPixDatasetDGL from data.data import LoadData from torch.utils.data import DataLoader from data.superpixels import SuperPixDataset # + start = time.time() DATASET_NAME = 'MNIST' dataset = SuperPixDatasetDGL(DATASET_NAME) print('Time (sec):',time.time() - start) # 356s=6min # + def plot_histo_graphs(dataset, title): # histogram of graph sizes graph_sizes = [] for graph in dataset: graph_sizes.append(graph[0].number_of_nodes()) #graph_sizes.append(graph[0].number_of_edges()) plt.figure(1) plt.hist(graph_sizes, bins=20) plt.title(title) plt.show() graph_sizes = torch.Tensor(graph_sizes) print('nb/min/max :',len(graph_sizes),graph_sizes.min().long().item(),graph_sizes.max().long().item()) plot_histo_graphs(dataset.train,'trainset') plot_histo_graphs(dataset.val,'valset') plot_histo_graphs(dataset.test,'testset') # + print(len(dataset.train)) print(len(dataset.val)) print(len(dataset.test)) print(dataset.train[0]) print(dataset.val[0]) print(dataset.test[0]) # + start = time.time() with open('data/superpixels/MNIST.pkl','wb') as f: pickle.dump([dataset.train,dataset.val,dataset.test],f) print('Time (sec):',time.time() - start) # 38s # - # # Test load function DATASET_NAME = 'MNIST' dataset = LoadData(DATASET_NAME) # 54s trainset, valset, testset = dataset.train, dataset.val, dataset.test # + start = time.time() batch_size = 10 collate = SuperPixDataset.collate train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate) print('Time (sec):',time.time() - start) # 0.0003s # -
data/superpixels/prepare_superpixels_MNIST.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from bs4 import BeautifulSoup as bs import requests import pymongo from splinter import Browser from webdriver_manager.chrome import ChromeDriverManager import time # + executable_path = {'executable_path': ChromeDriverManager().install()} browser = Browser('chrome', **executable_path, headless=False) url = 'https://mars.nasa.gov/news/' browser.visit(url) html = browser.html soup = bs(html, 'html.parser') # + news_title = soup.article.find('div', class_='content_title').a.text news_p = soup.article.find('div', class_='article_teaser_body').text # for story in stories: # try: # title = story.find('a') # titles = title.text # print(titles) # except AttributeError as e: # print(e) print(news_title) print(news_p) # - jpl_url = 'https://www.jpl.nasa.gov/' browser.visit(jpl_url) html = browser.html soup = bs(html) # + browser.links.find_by_partial_text('Images').click() time.sleep(2) browser.links.find_by_partial_text('Image').click() time.sleep(1) browser.links.find_by_partial_text('Download JPG').click() featured_image = browser.url print(featured_image) # + sf_url= 'https://space-facts.com/mars/' browser.visit(sf_url) html = browser.html soup = bs(html) mars_table = pd.read_html(sf_url) # - mars_df = mars_table[0] mars_df.head(10) html_table = mars_df.to_html() html_table sf_url= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars' browser.visit(sf_url) html = browser.html soup = bs(html) # + images= soup.section.find_all('img', class_= 'thumb') img_urls= [] for i in range(len(images)): img_dict= {} browser.find_by_css('img.thumb')[i].click() img_dict['title'] = browser.find_by_tag('h2').text img_dict['img_url'] = browser.find_link_by_text("Sample")['href'] img_urls.append(img_dict) browser.back() # - img_urls
Missions_to_Mars/mission_to_mars.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: sbi [~/.conda/envs/sbi/] # language: python # name: conda_sbi # --- # # HOD priors # In this notebook we will explore the priors for the HOD parameters. The best-fit HOD is from Reid+(2014). However, they only provide the marginalized $1\sigma$ uncertainties, so we'll have to do a bit of guess work to match their posterior. # # Crucially, we have to be careful about the resolution limit of the simulations. The halo mass limit is set by halos with >20 particles and varies as a function of cosmological parameters. import os, time import numpy as np import scipy as sp from simbig import halos as Halos from simbig import galaxies as Galaxies from simbig import forwardmodel as FM # --- plotting --- import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False # + # logMres = [] # for i in range(2000): # halos = Halos.Quijote_LHC_HR(i, z=0.5) # logMres.append(np.log10(np.min(np.array(halos['Mass'])))) # - cosmos = np.array([Halos.Quijote_LHC_cosmo(i) for i in range(2000)]) logMres = np.load('logmres.npy') bins = np.logspace(11.5, 12.5, 20) plt.hist(10**logMres, bins=bins) plt.xlabel(r'minimum $M_h$', fontsize=20) plt.xscale('log') plt.xlim(10**11.5, 10**12.5) import corner as DFM fig = DFM.corner(np.concatenate([cosmos, logMres[:,None]], axis=1)) # halo mass is driven entirely by $\Omega_m$ # + def Ncen_Mh(tt, logMh): ''' expected Ncen at Mh ''' logMmin = tt['logMmin'] sig_logM = tt['sigma_logM'] Ncen = 0.5 * (1. + sp.special.erf((logMh - logMmin)/sig_logM)) Ncen[~np.isfinite(Ncen)] = 0. return Ncen def Nsat_Mh(tt, logMh): ''' expected Nsat at Mh ''' _Ncen = Ncen_Mh(tt, logMh) logM0 = tt['logM0'] logM1 = tt['logM1'] alpha = tt['alpha'] Nsat = _Ncen * ((10**logMh - 10**logM0)/10**logM1)**alpha Nsat[~np.isfinite(Nsat)] = 0. return Nsat def Ngal_Mh(tt, logMh): return Ncen_Mh(tt, logMh) + Nsat_Mh(tt, logMh) # + # for CMASS-LOWZ # theta_fid = Galaxies.thetahod_lowz() # for CMASS from Reid+(2014) theta_fid = {'logMmin': 13.03, 'sigma_logM': 0.38, 'logM0': 13.27, 'logM1': 14.08, 'alpha': 0.76} print(theta_fid) # + def sample_prior(N): return np.array([13.031, 0.38, 13.27, 14.08, 0.76]) + np.array([0.029, 0.06, 0.13, 0.06, 0.18]) * np.random.normal(size=(N,5)) _hods = sample_prior(10000) # + logMbin = np.linspace(11., 16., 100) # logMh bins Ngals = [] for _hod in _hods: _theta = {'logMmin': _hod[0], 'sigma_logM': _hod[1], 'logM0': _hod[2], 'logM1': _hod[3], 'alpha': _hod[4]} Ngals.append(Ngal_Mh(_theta, logMbin)) Ngals = np.array(Ngals) # + fig = plt.figure(figsize=(10,6)) sub = fig.add_subplot(111) sub.plot(10**logMbin, Ngal_Mh(theta_fid, logMbin), c='k', ls='-', label='Reid+(2014)')#label='Parejko+(2013)') sub.plot(10**logMbin, Ncen_Mh(theta_fid, logMbin), c='k', ls='--') sub.plot(10**logMbin, Nsat_Mh(theta_fid, logMbin), c='k', ls=':') sub.fill_between(10**logMbin, np.quantile(Ngals, [0.01, 0.99], axis=0)[0], np.quantile(Ngals, [0.01, 0.99], axis=0)[1], color='C0', alpha=0.5, linewidth=0, label='prior') sub.fill_between([10**np.min(logMres), 10**np.max(logMres)], [1e-3, 1e-3], [1e3, 1e3], color='k', linewidth=0, alpha=0.25, label='minimum $M_h$') sub.legend(loc='lower right', fontsize=20) sub.set_xlabel('$M_h$', fontsize=25) sub.set_xscale('log') sub.set_xlim(1e11, 5e15) sub.set_ylabel(r'$<N_{\rm gal}>$', fontsize=25) sub.set_yscale('log') sub.set_ylim(1e-2, 5e1) # - # It seems the Reid+(2014) HOD doesn't quite fit and is affected significantly by the halo mass limit.
nb/hod_priors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Now You Code 1: Hello 2 Ways # # Write a Python program which prompts you to input your first name and then your last name. It should then print your name two ways First Last and Last, First. For example: # # ``` # What is your first name? Michael # What is your last name? Fudge # Hello, <NAME> # Or should I say <NAME> # ``` # # ## Step 1: Problem Analysis # # Inputs: firstname, lastname # # Outputs:"Hello, firstname, lastname" # # Algorithm (Steps in Program): # # 1.Input first name. ex: "Joe" # 2.Input last name. ex : "Recca" # 3.Computer displays: "Hello, <NAME>. Or should I say <NAME>" # Step 2: Write code here firstname = input("What is your first name?") lastname = input("What is your last name?") print("Hello,", firstname,lastname + ".") print("Or should I say", lastname + "," + firstname + ".") firstname = input("What is your first name?") lastname = input("What is your last name?") print("Hello,", firstname,lastname + ".") print("Or should I say", lastname + "," + firstname + ".") # ## Step 3: Questions # # 1. What happens when don't follow the instructions and enter your first name as your last name? Does the code still run? Why? # Yes, the code still runs because the computer was not told to differentiate betwen first and last names. # # 2. What type of error it when the program runs but does not handle bad input? # When the program cannot handle bad input, it is known as a logical error. # # 3. Is there anything you can do in code to correct this type of error? Why or why not? # One way to prevent a logic error is to write clear and concise code that is easy for the computer to understand. # ## Reminder of Evaluation Criteria # # 1. What the problem attempted (analysis, code, and answered questions) ? # 2. What the problem analysis thought out? (does the program match the plan?) # 3. Does the code execute without syntax error? # 4. Does the code solve the intended problem? # 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors) #
content/lessons/02/Now-You-Code/NYC1-Hello-2-Ways.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #%% Imports import numpy as np import pandas as pd from scipy.optimize import linear_sum_assignment import cv2 as cv import matplotlib.pyplot as plt import seaborn as sns from scipy.optimize import linear_sum_assignment from scipy.spatial import distance_matrix # %config InlineBackend.figure_format = 'svg' sns.set() # - # %% Loading dataframe from new and old result #df = pd.read_hdf('../data/testing_data/clement/old_combined.hdf', 'data') df = pd.read_hdf('../tests/clement/df_batch_0.hdf', key='df') # + # %% Names are different apparently so we need to rename them, only used for old stuff df = df.rename(columns={'X (in pixel)': 'x', 'Y (in pixel)': 'y'}) #renaming columns df = df[['frame', 'ID', 'x', 'y']] df = df.sort_values(by=['frame', 'ID'], ignore_index=True) # - n_flies = df.ID.unique().size # + change= np.sum(np.diff(df_new[['x', 'y']].to_numpy().reshape(-1, n_flies, 2), axis=0)**2, axis=2)**(1/2) print(np.max(change)) plt.figure(figsize=(15, 5)) plt.subplot(121) plt.hist(change.flatten(), bins=100, log=True, density=True) plt.title('New') plt.xlabel('Position change') # - # That's much too high, let's do the manual tracking. frame = 5365 dist = distance_matrix(df.query(f'frame == {frame}')[['x', 'y']].to_numpy(), df.query(f'frame == {frame + 1}')[['x', 'y']].to_numpy()) linear_sum_assignment(dist)[1] assignments = [linear_sum_assignment(distance_matrix(df.query(f'frame == {frame}')[['x', 'y']].to_numpy(), df.query(f'frame == {frame + 1}')[['x', 'y']].to_numpy()))[1] for frame in np.arange(99, 1000)] assignments class Hungarian: def __call__(self, coordinates_i, coordinates_j): identities_j = linear_sum_assignment( distance_matrix(coordinates_i, coordinates_j) )[1].squeeze() return identities_j
dev/manual_tracking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Manim v0.11.0 Release Tour # # This interactive worksheet contains an overview over the new features contained in the latest release of the community maintained version of Manim. # + from manim import * config.media_width = "80%" # - # ### [#2075](https://github.com/ManimCommunity/manim/pull/2075): New method: `Mobject.set_default` for changing default values # With the new `set_default` method it is easy to change default arguments for mobjects. For example, `Text.set_default(color=RED)` changes the default color of `Text` to red: # + # %%manim -qm -v WARNING ChangedDefaultDemo class ChangedDefaultDemo(Scene): def construct(self): Text.set_default(color=BLUE) t = Text("This is blue text, magic!") # You can also change multiple arguments at once: Circle.set_default(color=GREEN, fill_opacity=0.5) c = Circle() VGroup(t, c).arrange(DOWN) self.add(t, c) # Call the method without arguments to restore the default behavior! Text.set_default() Circle.set_default() # - # # ### [#2094](https://github.com/ManimCommunity/manim/pull/2094): Implicit function plotting # # There is now a new Mobject allowing to plot points that satisfy some equation. Here is an example for the curves defined by $(x^2 + y^2)^2 - 42 (x^2 - y^2) = 0$ (yellow) and $x^3 - x + 1 - y^2 = 0$ (red). # + # ImplicitFunction? # + # %%manim -qm -v WARNING ImplicitFunctionDemo import numpy as np class ImplicitFunctionDemo(Scene): def construct(self): plane = NumberPlane() curve = ImplicitFunction( lambda x, y: (x**2 + y**2)**2 - 42*(x**2 - y**2), color=YELLOW, max_quads=2000, ) self.add(plane) self.play(Create(curve)) self.wait() elliptic_curve = ImplicitFunction( lambda x, y: x**3 - x + 1 - y**2, color=RED, ) self.play(Create(elliptic_curve)) self.wait() # - # # ### [#1991](https://github.com/ManimCommunity/manim/pull/1991): Added support for boolean operations on `VMobject`s # # Manim can now compute the union, intersection, and difference of mobjects! # + # %%manim -qm -v WARNING BooleanOperationDemo class BooleanOperationDemo(Scene): def construct(self): VMobject.set_default(color=GREEN, fill_opacity=0.5) Circle.set_default(radius=2, fill_opacity=0.5) c1 = Circle(color=YELLOW).shift(LEFT) c2 = Circle(color=BLUE).shift(RIGHT) circles = VGroup(c1, c2) self.add(circles.scale(0.5)) # Note: changing the default color and fill opacity for VMobject also affects subclasses # like Union, Intersection, Difference, and Exclusion. grid = VGroup( Union(c1, c2), Intersection(c1, c2), Difference(c1, c2), Exclusion(c1, c2) ).arrange_in_grid(2, 2, buff=3) self.add(grid) VMobject.set_default() Circle.set_default() # - # ### [#2118](https://github.com/ManimCommunity/manim/pull/2118): Added 3D support for `ArrowVectorField` and `StreamLines` # When passing a `z_range` parameter to `ArrowVectorField` or `StreamLines`, the vector field will be rendered in three dimensions. **Note:** this example takes a bit longer to render. # + # %%manim -qm -v WARNING --disable_caching ThreeDArrowVectorField class ThreeDArrowVectorField(ThreeDScene): def construct(self): def func(p): return np.cross(p, np.array([0, 0, p[2]])) vector_field = ArrowVectorField( func, x_range=[-2.5, 2.5, 1], y_range=[-2.5, 2.5, 1], z_range=[-1.5, 1.5, 1], ) particle = Dot3D(OUT + RIGHT * 2) self.add(vector_field, particle) particle.add_updater(vector_field.get_nudge_updater(2)) self.move_camera(PI/3, -PI/4, run_time=3) stream_lines = StreamLines( func, x_range=[-2.5, 2.5, 1.5], y_range=[-2.5, 2.5, 1.5], z_range=[-1.5, 1.5, 1.5], ) self.play(FadeTransform(vector_field, stream_lines), run_time=0.5) self.wait() # - # To find out more about the exciting new features, fixed bugs, deprecated functions, and other improvements made in Manim v0.11.0, check out the full changelog at <https://docs.manim.community/en/stable/changelog/0.11.0-changelog.html>. Enjoy *manimating*!
v0.11.0 Release Tour.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Educat8n/Deep-Learning-for-Computer-Vision-with-Python-and-Keras-TensorFlow-2.0/blob/main/Module1/TensorFlow_1px_vs_2px.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + cellView="form" id="jYitT5RcmKBh" #@title Run this Cell to Shift to TF1.x version # %%capture # %tensorflow_version 1.x ## Use TensorFlow 1.x version # + colab={"base_uri": "https://localhost:8080/"} id="aGEVn007XnY_" outputId="cfe0c349-b9c6-4843-c374-cb5cb924992d" import tensorflow as tf print(f"TensorFlow Version is: {tf.__version__}") # + [markdown] id="gKwfqQYeSkEU" # ### Eager Execution # + id="axbRPir3r0U1" def my_function(a,b): return tf.add(a,b) # + colab={"base_uri": "https://localhost:8080/"} id="DPHXqfwPk8qN" outputId="bab59aad-ceb5-47ab-bd89-3bc875e6dba8" if (tf.__version__).split('.')[0] == '1': ##Run the code if Tf version 1.x a = tf.placeholder(tf.int64) b = tf.placeholder(tf.int64) init = tf.global_variables_initializer() sum_ = my_function(a, b) with tf.Session() as sess: sess.run(init) output = sess.run(sum_, feed_dict={a:3, b:4}) print(f'TF1.x result {output}') else: ##Run the code if Tf version 2.x output = my_function(3, 4) print(f'TF2.x result {output}') # + [markdown] id="yKt-DfG_vad3" # ### Tf Function Decorator for speedup # # The code below is only for TF2.0 # + colab={"base_uri": "https://localhost:8080/"} id="WYq8kZIDxTiX" outputId="34ec874f-9c11-4a73-faf3-d97775067636" import tensorflow as tf print(f"TensorFlow Version is: {tf.__version__}") # + id="cV5_QLSMuzJf" def f(x): for i in range(1000): for j in range(1000): x += 1 return x # + colab={"base_uri": "https://localhost:8080/"} id="VmGL_kmPuQao" outputId="3dffccdb-9743-490b-9b28-318944f43e33" # %%timeit x = 0.0 x = f(x) # + id="yXUfHz4lxHQ-" @tf.function def f_deco(x): for i in range(1000): for j in range(1000): x += 1 return x # + colab={"base_uri": "https://localhost:8080/"} id="Je9YYYnWs_cG" outputId="3ec459c0-62e3-4e34-b2e8-c020b8bdb8c8" # %%timeit x = 0.0 x = f_deco(x) # + id="7QQAJnVHvhYc"
Module1/TensorFlow_1px_vs_2px.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # + [markdown] colab_type="text" id="KQqCmLY16SYq" # Licensed under the Apache License, Version 2.0. # + colab={} colab_type="code" id="h8nRDYzUk_Q0" import tensorflow as tf import os from experimental.attentive_uncertainty import attention # local file importfrom experimental.attentive_uncertainty.contextual_bandits.pretrain import train # local file import # + colab={} colab_type="code" id="QkNF3TXDRsPP" savedir = '/tmp/wheel_bandit/models/multitask' num_target = 50 num_context = 512 data_hparams = tf.contrib.training.HParams(context_dim=2, num_actions=5, num_target=num_target, num_context=num_context) X_HIDDEN_SIZE = 100 x_encoder_sizes = [X_HIDDEN_SIZE]*2 HIDDEN_SIZE = 64 latent_units = 32 global_latent_net_sizes = [HIDDEN_SIZE]*2 + [2*latent_units] local_latent_net_sizes = [HIDDEN_SIZE]*3 + [2] x_y_encoder_sizes = [HIDDEN_SIZE]*3 heteroskedastic_net_sizes = None mean_att_type = attention.laplace_attention scale_att_type_1 = attention.laplace_attention scale_att_type_2 = attention.laplace_attention att_type = 'multihead' att_heads = 8 data_uncertainty = False # + [markdown] colab_type="text" id="q8gWVIF4AccB" # # Prior Predictive + Freeform # + colab={} colab_type="code" id="MTMrAQU0AYm6" uncertainty_type = 'attentive_freeform' local_variational = False model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, global_latent_net_sizes=global_latent_net_sizes, local_latent_net_sizes=local_latent_net_sizes, heteroskedastic_net_sizes=heteroskedastic_net_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, data_uncertainty=data_uncertainty, local_variational=local_variational) save_path = os.path.join(savedir, 'best_prior_freeform_mse_unclipped.ckpt') training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 310327, "status": "ok", "timestamp": 1567761729879, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="sIvtExqeAj8s" outputId="7495013a-1092-450a-9758-cb51fa19e734" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="lf2gejIrPEoy" # # Posterior predictive + freeform # + colab={} colab_type="code" id="GQeFc12GPMLS" uncertainty_type = 'attentive_freeform' local_variational = True model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, global_latent_net_sizes=global_latent_net_sizes, local_latent_net_sizes=local_latent_net_sizes, heteroskedastic_net_sizes=heteroskedastic_net_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, data_uncertainty=data_uncertainty, local_variational=local_variational) save_path = os.path.join(savedir, 'best_posterior_freeform_mse_unclipped.ckpt') training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 475166, "status": "ok", "timestamp": 1567762205410, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="lFj9-RUoPWKw" outputId="b19ab2f1-1b51-41a7-d4b7-3e137c5e11f2" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="tVRIew4cAvFi" # # Prior Predictive + GP # + colab={} colab_type="code" id="5MM3yu4nAyQk" uncertainty_type = 'attentive_gp' local_variational = False model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, global_latent_net_sizes=global_latent_net_sizes, local_latent_net_sizes=local_latent_net_sizes, heteroskedastic_net_sizes=heteroskedastic_net_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, data_uncertainty=data_uncertainty, local_variational=local_variational) save_path = os.path.join(savedir, 'best_prior_gp_mse_unclipped.ckpt') training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 1394266, "status": "ok", "timestamp": 1567763600014, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="w19ZziWCA3ny" outputId="5b2c1415-4b52-4761-add5-7e0887c5df9e" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="yHVPD2Xp67Ml" # # Posterior predictive + GP # + colab={} colab_type="code" id="KzyEuHrBiL9P" uncertainty_type = 'attentive_gp' local_variational = True model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, global_latent_net_sizes=global_latent_net_sizes, local_latent_net_sizes=local_latent_net_sizes, heteroskedastic_net_sizes=heteroskedastic_net_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, data_uncertainty=data_uncertainty, local_variational=local_variational) save_path = os.path.join(savedir, 'best_posterior_gp_mse_unclipped.ckpt') training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 3085087, "status": "ok", "timestamp": 1567766685435, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="rp_qzCf2az7E" outputId="0e0da25c-9b71-460a-8057-f4b8845d086f" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="Z9DEUFJy7yFy" # # Prior predictive + freeform # + colab={} colab_type="code" id="byOLl9e4p8vH" num_target = 50 num_context = 512 data_hparams = tf.contrib.training.HParams(context_dim=2, num_actions=5, num_target=num_target, num_context=num_context) X_HIDDEN_SIZE = 100 x_encoder_sizes = [X_HIDDEN_SIZE]*3 HIDDEN_SIZE = 64 latent_units = 32 freeform_decoder_sizes = [HIDDEN_SIZE]*3 + [2] global_decoder_sizes = [HIDDEN_SIZE]*2 + [2*latent_units] global2local_decoder_sizes = None x_y_encoder_sizes = [HIDDEN_SIZE]*3 heteroskedastic_sizes = None uncertainty_type = None mean_att_type = attention.laplace_attention scale_att_type_1 = attention.laplace_attention scale_att_type_2 = attention.laplace_attention att_type = 'multihead' att_heads = 8 data_uncertainty = False model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, freeform_decoder_sizes=freeform_decoder_sizes, global_decoder_sizes=global_decoder_sizes, global2local_decoder_sizes=global2local_decoder_sizes, heteroskedastic_sizes=heteroskedastic_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, meta_learn=False, data_uncertainty=data_uncertainty) save_path = os.path.join(savedir, 'best_prior_freeform_mse_unclipped.ckpt') pred_type = 'prior_predictive' training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, pred_type=pred_type, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 309423, "status": "ok", "timestamp": 1567710539075, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="6XzBpn1DO7nP" outputId="36dd2220-0539-4ef5-e672-1c59d86ebc3b" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="cpdrmdmv6JsJ" # # Prior predictive + gp # + colab={} colab_type="code" id="zJoYi6ep6PEg" num_target = 50 num_context = 512 data_hparams = tf.contrib.training.HParams(context_dim=2, num_actions=5, num_target=num_target, num_context=num_context) X_HIDDEN_SIZE = 100 x_encoder_sizes = [X_HIDDEN_SIZE]*3 HIDDEN_SIZE = 64 latent_units = 32 freeform_decoder_sizes = None global_decoder_sizes = [HIDDEN_SIZE]*2 + [2*latent_units] global2local_decoder_sizes = [HIDDEN_SIZE]*3 + [2] x_y_encoder_sizes = [HIDDEN_SIZE]*3 heteroskedastic_sizes = None uncertainty_type = 'attentive_gp' mean_att_type = attention.laplace_attention scale_att_type_1 = attention.laplace_attention scale_att_type_2 = attention.laplace_attention att_type = 'multihead' att_heads = 8 data_uncertainty = False model_hparams = tf.contrib.training.HParams(activation=tf.nn.relu, output_activation=tf.nn.relu, x_encoder_sizes=x_encoder_sizes, x_y_encoder_sizes=x_y_encoder_sizes, freeform_decoder_sizes=freeform_decoder_sizes, global_decoder_sizes=global_decoder_sizes, global2local_decoder_sizes=global2local_decoder_sizes, heteroskedastic_sizes=heteroskedastic_sizes, uncertainty_type=uncertainty_type, att_type=att_type, att_heads=att_heads, mean_att_type=mean_att_type, scale_att_type_1=scale_att_type_1, scale_att_type_2=scale_att_type_2, meta_learn=False, data_uncertainty=data_uncertainty) save_path = os.path.join(savedir, 'best_prior_gp_mse_unclipped.ckpt') pred_type = 'prior_predictive' training_hparams = tf.contrib.training.HParams(lr=0.01, optimizer=tf.train.RMSPropOptimizer, num_iterations=10000, batch_size=10, num_context=num_context, num_target=num_target, print_every=50, save_path=save_path, pred_type=pred_type, max_grad_norm=1000.0) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 1380487, "status": "ok", "timestamp": 1567711919896, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="jxSftGKd6nwH" outputId="977509d6-4b23-4223-f817-7ca48d0c563b" train(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="f6ulTUSa6yXV" # # Archive # + colab={} colab_type="code" id="2wl_uST_i6q5" def sample_training_wheel_bandit_data(num_total_states, num_actions, context_dim, delta, mean_v, std_v, mu_large, std_large): """Samples from Wheel bandit game (see https://arxiv.org/abs/1802.09127). Args: num_total_states: Number of points to sample, i.e. (context, action rewards). num_actions: Number of actions. context_dim: Number of dimensions in the context delta: Exploration parameter: high reward in one region if norm above delta. mean_v: Mean reward for each action if context norm is below delta. std_v: Gaussian reward std for each action if context norm is below delta. mu_large: Mean reward for optimal action if context norm is above delta. std_large: Reward std for optimal action if context norm is above delta. Returns: dataset: Sampled matrix with n rows: (context, one_hot_actions). opt_vals: Vector of expected optimal (reward, action) for each context. """ data = [] actions = [] rewards = [] # sample uniform contexts in unit ball while len(data) < num_total_states: raw_data = np.random.uniform(-1, 1, (int(num_total_states / 3), context_dim)) for i in range(raw_data.shape[0]): if np.linalg.norm(raw_data[i, :]) <= 1: data.append(raw_data[i, :]) states = np.stack(data)[:num_total_states, :] # sample rewards and random actions for i in range(num_total_states): r = [np.random.normal(mean_v[j], std_v[j]) for j in range(num_actions)] if np.linalg.norm(states[i, :]) >= delta: # large reward in the right region for the context r_big = np.random.normal(mu_large, std_large) if states[i, 0] > 0: if states[i, 1] > 0: r[0] = r_big else: r[1] = r_big else: if states[i, 1] > 0: r[2] = r_big else: r[3] = r_big one_hot_vector = np.zeros((5)) random_action = np.random.randint(num_actions) one_hot_vector[random_action]=1 actions.append(one_hot_vector) rewards.append(r[random_action]) rewards = np.expand_dims(np.array(rewards), -1) state_action_pairs = np.hstack([states, actions]) perm = np.random.permutation(len(rewards)) return state_action_pairs[perm, :], rewards[perm, :] def get_training_wheel_data(num_total_states, num_actions, context_dim, delta): mean_v = [1.0, 1.0, 1.0, 1.0, 1.2] std_v = [0.01, 0.01, 0.01, 0.01, 0.01] mu_large = 50 std_large = 0.01 state_action_pairs, rewards = sample_training_wheel_bandit_data(num_total_states, num_actions, context_dim, delta, mean_v, std_v, mu_large, std_large) return state_action_pairs, rewards # + colab={} colab_type="code" id="d_8d29TAyeLb" def procure_dataset(hparams, num_wheels, seed=0): np.random.seed(seed) data_type = 'wheel_2' all_state_action_pairs, all_rewards = [], [] for _ in range(num_wheels): delta = np.random.uniform() state_action_pairs, rewards = get_training_wheel_data( hparams.num_target + hparams.num_context, hparams.num_actions, hparams.context_dim, delta) all_state_action_pairs.append(state_action_pairs) all_rewards.append(rewards) all_state_action_pairs = np.stack(all_state_action_pairs) all_rewards = np.stack(all_rewards) return all_state_action_pairs, all_rewards @tf.function def step(model, data, optimizer_config, num_context): context_x, context_y, target_x, target_y, unseen_targets = data with tf.GradientTape() as tape: prior_prediction, posterior_prediction = model( context_x, context_y, target_x, target_y) unseen_targets = target_y[:, num_context:] unseen_predictions = posterior_prediction[:, num_context:] nll = utils.nll(unseen_targets, unseen_predictions) mse = utils.mse(unseen_targets, unseen_predictions) local_kl = tf.reduce_mean(model.losses[-1][:, num_context:]) global_kl = tf.reduce_mean(model.losses[-2]) # loss = nll + local_kl + global_kl loss = mse + local_kl + global_kl # loss = nll + global_kl # loss = mse + global_kl gradients = tape.gradient(loss, model.trainable_variables) optimizer_config.apply_gradients(zip(gradients, model.trainable_variables)) return nll, mse, local_kl, global_kl def training_loop(train_dataset, valid_dataset, model, hparams): optimizer_config = hparams.optimizer(hparams.lr) num_context = hparams.num_context best_mse = np.inf train_target_x, train_target_y = train_dataset def _get_splits(dataset, n_context, batch_size, points_perm=True): full_x, full_y = dataset dataset_perm = np.random.permutation(len(full_x))[:batch_size] if points_perm: datapoints_perm = np.random.permutation(full_x.shape[1]) else: datapoints_perm = np.arange(full_x.shape[1]) target_x = tf.to_float(full_x[dataset_perm[:, None], datapoints_perm]) target_y = tf.to_float(full_y[dataset_perm[:, None], datapoints_perm]) context_x = target_x[:, :n_context, :] context_y = target_y[:, :n_context, :] unseen_targets = target_y[:, n_context:] return context_x, context_y, target_x, target_y, unseen_targets for it in range(hparams.num_iterations): batch_train_data = _get_splits(train_dataset, num_context, hparams.batch_size, points_perm=True) nll, mse, local_z_kl, global_z_kl = step( model, batch_train_data, optimizer_config, num_context) if it % hparams.print_every == 0: batch_context_x, batch_context_y, batch_target_x, batch_target_y, batch_unseen_targets = _get_splits(valid_dataset, num_context, hparams.batch_size, points_perm=False) prior_prediction, posterior_prediction = model( batch_context_x, batch_context_y, batch_target_x, batch_target_y) valid_unseen_predictions = posterior_prediction[:, num_context:] # unseen_predictions = prior_prediction[:, num_context:] valid_nll = utils.nll(batch_unseen_targets, valid_unseen_predictions) valid_mse = utils.mse(batch_unseen_targets, valid_unseen_predictions) valid_local_kl = tf.reduce_mean(model.losses[-1][:, num_context:]) valid_global_kl = tf.reduce_mean(model.losses[-2]) print('it: {}, train nll: {}, mse: {}, local kl: {} global kl: {} ' 'valid nll: {}, mse: {}, local kl: {} global kl: {}' .format(it, nll, mse, local_z_kl, global_z_kl, valid_nll, valid_mse, valid_local_kl, valid_global_kl)) if valid_mse.numpy() < best_mse: print('Saving best model') best_mse = valid_mse.numpy() model.save_weights(hparams.save_path) print('Best MSE is', best_mse) def pretrain(data_hparams, model_hparams, training_hparams): all_state_action_pairs, all_rewards = procure_dataset(data_hparams, num_wheels=100, seed=0) train_dataset = (all_state_action_pairs, all_rewards) all_state_action_pairs, all_rewards = procure_dataset(data_hparams, num_wheels=10, seed=42) valid_dataset = (all_state_action_pairs, all_rewards) model = Regressor( input_dim=data_hparams.context_dim + data_hparams.num_actions, output_dim=1, x_encoder_sizes=model_hparams.x_encoder_sizes, x_y_encoder_sizes=model_hparams.x_y_encoder_sizes, freeform_decoder_sizes=model_hparams.freeform_decoder_sizes, global_decoder_sizes=model_hparams.global_decoder_sizes, global2local_decoder_sizes=model_hparams.global2local_decoder_sizes, heteroskedastic_sizes=model_hparams.heteroskedastic_sizes, att_type=model_hparams.att_type, att_heads=model_hparams.att_heads, uncertainty_type=model_hparams.uncertainty_type, mean_att_type=model_hparams.mean_att_type, scale_att_type_1=model_hparams.scale_att_type_1, scale_att_type_2=model_hparams.scale_att_type_2, activation=model_hparams.activation, output_activation=model_hparams.output_activation, meta_learn=model_hparams.meta_learn) training_loop(train_dataset, valid_dataset, model, training_hparams) # # check if weights are saved correctly # valid_context_x, valid_context_y, valid_target_x, valid_target_y, valid_unseen_targets = valid_data # model.load_weights(training_hparams.save_path) # prior_prediction, posterior_prediction = model( # valid_context_x, # valid_context_y, # valid_target_x, # valid_target_y) # valid_unseen_predictions = posterior_prediction[:, num_context:] # valid_nll = utils.nll(valid_unseen_targets, valid_unseen_predictions) # valid_mse = utils.mse(valid_unseen_targets, valid_unseen_predictions) # print('Verified best MSE is', valid_mse.numpy()) # + colab={} colab_type="code" id="DwJfWum0XuPS" pretrain(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="hn84Yt7mcGW_" # # prior predictive + mse # + colab={"height": 717} colab_type="code" executionInfo={"elapsed": 522082, "status": "ok", "timestamp": 1567369090554, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="7NZ26o7tcOlw" outputId="6162e0bb-cffb-4e59-a05a-eebe41a16101" pretrain(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="Q6P3ZTfhXuw-" # # prior predictive + nll # + colab={"height": 717} colab_type="code" executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1567371493767, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="ffIkuwn6X0ts" outputId="6d44ec53-ed08-483a-83c3-692fa49eb70b" pretrain(data_hparams, model_hparams, training_hparams) # + [markdown] colab_type="text" id="KNBoUZfN2Ove" # # Hide Run # # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 2632432, "status": "ok", "timestamp": 1567378651308, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="UZ7gkMsF225b" outputId="06fe7c50-b82c-45cc-b6e4-55f45c715a25" # + [markdown] colab_type="text" id="z5tqtlgXXf33" # # posterior predictive + mse # + colab={"height": 717} colab_type="code" executionInfo={"elapsed": 574430, "status": "ok", "timestamp": 1567372182910, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="cSZoTAQmoK-x" outputId="e3eca267-7dcb-4289-acdf-d46760ee24be" pretrain(data_hparams, model_hparams, training_hparams) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 588365, "status": "error", "timestamp": 1567367792510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="3_j_dbbtWayG" outputId="c33c9505-8084-4e0e-f1eb-c832ba449a71" pretrain(data_hparams, model_hparams, training_hparams) # + colab={"height": 1000} colab_type="code" executionInfo={"elapsed": 770744, "status": "error", "timestamp": 1567366933745, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAa2HvmIe1GgREzEkl3p3FRszir9TfbhGC4qNCS=s64", "userId": "07983129814092358079"}, "user_tz": 420} id="ZNiEc3lXitGM" outputId="69357645-70e8-40a2-a358-99288c4c3e88" pretrain(data_hparams, model_hparams, training_hparams) # + colab={} colab_type="code" id="SOcqALkbs79i"
experimental/attentive_uncertainty/colabs/2019_08_26_snp_contextual_bandits_multi_task.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="N63nLhF9zczX" # This code needs a high RAM runtime. Otherwise, the session would crash. If you don't have a Colab Pro subscription, please switch to [Kaggle Kernels](https://www.kaggle.com/kernels). # - # ## Setup and imports # + colab={"base_uri": "https://localhost:8080/"} id="z_puUVads4jp" outputId="6232b5cd-e437-4291-c5c5-80c7723f10a2" # !pip install -U cython # !pip install -q git+https://github.com/lucasb-eyer/pydensecrf.git # !pip install -q ray # + id="EYnbDi4ZtII3" from pydensecrf.utils import unary_from_labels, create_pairwise_bilateral import pydensecrf.densecrf as dcrf from skimage.color import gray2rgb from skimage.color import rgb2gray from tqdm import tqdm_notebook import numpy as np import subprocess # + colab={"base_uri": "https://localhost:8080/"} id="JLLZmGDBvkaS" outputId="265b7579-4fec-48cc-df4d-0873d7750aed" import psutil num_cpus = psutil.cpu_count(logical=False) import ray ray.init(num_cpus=num_cpus) print(f"CPUs: {num_cpus}") # - # ## Load the submission file # Here, we load the load the submission file generated using the `Ensemble_Inference.ipynb` notebook. # + colab={"base_uri": "https://localhost:8080/"} id="qvkuik_ytQKc" outputId="7094b417-86db-4f73-aa57-ce2424660384" submissions = np.load("submission.npy") submissions.shape # - # ## Define CRF utility # # The function below is taken from [this Kaggle Kernel](https://www.kaggle.com/meaninglesslives/apply-crf). We tuned the below hyperparameters: # # * `sxy` # * `compat` # * `inference` steps # + id="O34Nk0vNwAjj" @ray.remote def custom_crf(mask_img, shape=(256, 256)): # Converting annotated image to RGB if it is Gray scale if(len(mask_img.shape)<3): mask_img = gray2rgb(mask_img) # Converting the annotations RGB color to single 32 bit integer annotated_label = mask_img[:,:,0] + (mask_img[:,:,1]<<8) + (mask_img[:,:,2]<<16) # Convert the 32bit integer color to 0,1, 2, ... labels. colors, labels = np.unique(annotated_label, return_inverse=True) n_labels = 2 # Setting up the CRF model d = dcrf.DenseCRF2D(shape[1], shape[0], n_labels) # Get unary potentials (neg log probability) U = unary_from_labels(labels, n_labels, gt_prob=0.7, zero_unsure=False) d.setUnaryEnergy(U) # This adds the color-independent term, features are the locations only. d.addPairwiseGaussian(sxy=(12, 12), compat=4, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC) # Run Inference for 20 steps Q = d.inference(20) # Find out the most probable class for each pixel. MAP = np.argmax(Q, axis=0) return MAP.reshape((shape[0], shape[1])) # - # In our experiments, we found out that the higher the values were for `sxy`, `compat`, and `inference` the better the results were. But of course, this is not practically feasible when working in a resource-constrained environment. So, it's better to settle with a score that keeps the trade-offs well balanced. # ## Apply CRF and prepare the submission file # + colab={"base_uri": "https://localhost:8080/", "height": 117, "referenced_widgets": ["0e1d91d543e0447fac265e6520a76818", "9a97279553984be289dd86bcbd96fa58", "900f0b8ac6b84e7996d25f1b6978fd01", "6f669f07c3924ff6af5d2d443c7c0feb", "<KEY>", "5cf7777dff544a9c9e58642385a0eb49", "5846deda351e45008aed46f448ca2755", "97eac1974f6c478b91739beadc57b313"]} id="USIJLjOSwWub" outputId="934dd0d6-06e7-41ca-9ac1-7785398edd12" crf_ids = [] for submission in tqdm_notebook(submissions): submission_id = ray.put(submission) crf_ids.append(custom_crf.remote(submission_id)) crfs = ray.get(crf_ids) crfs = np.array(crfs).astype("uint8") crfs.shape, crfs.dtype # + colab={"base_uri": "https://localhost:8080/"} id="KdTR_Oc26rf7" outputId="827d2933-8f19-41b8-cc02-82f7ce1f781d" save_path = "submission_crf.npy" np.save(save_path, crfs, fix_imports=True, allow_pickle=False) subprocess.run(["zip", "submission_crf.zip", save_path])
notebooks/Apply_CRF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 7.3 `array` Attributes import numpy as np integers = np.array([[1, 2, 3], [4, 5, 6]]) integers floats = np.array([0.0, 0.1, 0.2, 0.3, 0.4]) floats # ### Determining an `array`’s Element Type integers.dtype floats.dtype # ### Determining an `array`’s Dimensions integers.ndim floats.ndim integers.shape floats.shape # ### Determining an `array`’s Number of Elements and Element Size integers.size integers.itemsize floats.size floats.itemsize # ### Iterating through a Multidimensional `array`’s Elements for row in integers: for column in row: print(column, end=' ') print() for i in integers.flat: print(i, end=' ') ########################################################################## # (C) Copyright 2019 by Deitel & Associates, Inc. and # # Pearson Education, Inc. All Rights Reserved. # # # # DISCLAIMER: The authors and publisher of this book have used their # # best efforts in preparing the book. These efforts include the # # development, research, and testing of the theories and programs # # to determine their effectiveness. The authors and publisher make # # no warranty of any kind, expressed or implied, with regard to these # # programs or to the documentation contained in these books. The authors # # and publisher shall not be liable in any event for incidental or # # consequential damages in connection with, or arising out of, the # # furnishing, performance, or use of these programs. # ##########################################################################
examples/ch07/snippets_ipynb/07_03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import os import sys sys.path.append(os.path.abspath('../')) from src.data.utility import DataReader, BeerData import warnings warnings.simplefilter(action='ignore', category=FutureWarning) # %load_ext autoreload # %autoreload 2 # - # Instantiate the custom data reader class data_reader = DataReader() # Load Raw Train Data train_df = data_reader.read_data(BeerData.RAW) # ### 1. View Original Data train_df.info() print("Raw Data Dimension", train_df.shape) # ### 2. Exmine NULL values and Drop the rows with Null for brewery_name train_df.isna().sum() ### Exmine How many row has missing brewy names brew_name_null = train_df[train_df['brewery_name'].isnull()] brew_name_null.shape ## 15 rows missing for brewy-names at brewery_id 1193 x 9 and 27 X 6 ### Drop the rows if "brewy_name" is NULL ### Select only non-null value of brewry_name df_cleaned = train_df.copy() df_cleaned = df_cleaned[df_cleaned['brewery_name'].notnull()] df_cleaned.head() # ### 3. Select the features - Drop unpromising-looking features # + # Follow the commonsense col_to_drop = ['beer_beerid', 'brewery_id', "review_profilename", "beer_name", "review_time"] target_column = 'beer_style' df_cleaned = df_cleaned.drop(col_to_drop, axis = 1) #df_cleaned.head() df_cleaned.isna().sum() # - # ### 4. Label Encoding to Target Column # + import pandas as pd from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df_cleaned['beer_style_encoded'] = le.fit_transform(df_cleaned['beer_style']) # df_cleaned['brewery_name_encoded'] = le.fit_transform(df_cleaned['brewery_name']) # df_cleaned.head() # Drop the beer_style column beer_style = df_cleaned.pop('beer_style') df_cleaned['beer_style_encoded'].value_counts().reset_index() #print(df_cleaned.isna().sum()) # - # ### 5. Subset the dataset for experiment as the original dataset is huge in size ,1500K observations # + from src.data.sets import split_sets_random, save_sets, load_sets, subset_x_y #target_column_new = 'beer_style_encoded' features = ["review_overall", "review_aroma","review_appearance", "review_palate", "review_taste", "beer_abv" , "brewery_name"] y_main, X_main = subset_x_y(df_cleaned[features], df_cleaned['beer_style_encoded'], 0, 200000) # y_val, X_val = subset_x_y(df_cleaned[features], df_cleaned['beer_style_encoded'], 100000, 200000) # y_test, X_test = subset_x_y(df_cleaned[features], df_cleaned['beer_style_encoded'], 200000, 300000) # + # Check the size of the Target Class Distribution in the subset Dataset (unique, counts) = np.unique(y_main, return_counts=True) frequencies = np.asarray((unique, counts)).T #print(frequencies) ## Remark - the distribution is same as original # - # ### 6. Imputation of Null values in 'beer_abv' column # + #### Before Imputation print("Null Check: Before Imputation\n", X_main.isna().sum()) X_main['beer_abv'] = X_main['beer_abv'].fillna(X_main['beer_abv'].mean()) #### After Imputation print("\n Null Check: After Imputation\n", X_main.isna().sum()) # - # ### 7. Scaling the numerical columns # + from sklearn.preprocessing import StandardScaler sc = StandardScaler() # Select the numerical columns num_cols = ['review_overall', 'review_aroma', 'review_appearance', 'review_palate', 'review_taste', 'beer_abv'] X_main_copy = X_main.copy() #X_main_copy.head() X_main_copy[num_cols] = sc.fit_transform(X_main_copy[num_cols]) X_main_scaled = X_main_copy.copy() # + ###### AFTER SCALING ##### X_main_scaled['beer_style'] = y_main print("Null Check", X_main_scaled.isna().sum()) X_main_scaled.head() # - # ### 8. KElbow Visualiser to get the Cluster count # + from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import KMeans num_cols = ["review_overall", 'review_aroma', 'review_appearance', 'review_palate', 'review_taste', 'beer_abv'] X_main_scaled_copy = X_main_scaled.copy() model = KElbowVisualizer(KMeans(), k=(2,15)) model.fit(X_main_scaled_copy[num_cols]) n_cluster = model.elbow_value_ n_cluster # - # ### 9. Gaussian Mixture - Add new feature "cluster_kmean" # + from sklearn.mixture import GaussianMixture gmm = GaussianMixture(n_components=n_cluster, random_state=8) gmm.fit(X_main_scaled_copy[num_cols]) # - cluster_prediction = gmm.predict(X_main_scaled_copy[num_cols]) X_main_scaled_copy['cluster_kmean'] = cluster_prediction obs = pd.DataFrame(X_main_scaled_copy.iloc[0][num_cols]).transpose() gmm.predict(obs) X_main_scaled_copy.head() # ### 10. One Hot Encoding the feature - "brewery_name" # + from sklearn.preprocessing import OneHotEncoder ohe = OneHotEncoder(sparse=False) ## Re-assign the brewey_name into the dataframe to do OHE,because it was removed during the scaling and k-means clustering X_main_scaled_kmean = X_main_scaled_copy.copy() X_main_scaled_kmean['brewery_name'] = X_main['brewery_name'] X_main_cat_enc = pd.DataFrame(ohe.fit_transform(X_main_scaled_kmean[['brewery_name']])) X_main_cat_enc.columns = ohe.get_feature_names(['brewery_name']) # Drop the original column brewery_name as the encoded columns are inplace X_main_scaled_kmean.drop('brewery_name', axis = 1, inplace=True) X_main_scaled_encoded = pd.concat([X_main_scaled_kmean, X_main_cat_enc], axis=1) ## Re-Add the beer_style_encoded ahead of splitting X_main_scaled_encoded['beerstyle_encoded'] = y_main X_main_scaled_encoded.head() # - # ### 11. Split the encoded and scaled 200k dataset into Train-Val-Test sets X_train, y_train, X_val, y_val, X_test, y_test = split_sets_random(X_main_scaled_encoded, "beerstyle_encoded", test_ratio=0.2, to_numpy=True) # + # print(X_train.shape) # print(X_val.shape) # print(X_test.shape) # - # ### 12. Baseline Model # + from src.models.null import NullModel base_model = NullModel(target_type="classification") y_base = base_model.fit_predict(y_train) from src.models.performance import print_reg_perf print_reg_perf(y_base, y_train, set_name='Training') # - # ### 13. Load Pytorch Dataset # + from src.models.pytorch import PytorchDataset train_dataset = PytorchDataset(X=X_train, y=y_train) val_dataset = PytorchDataset(X=X_val, y=y_val) test_dataset = PytorchDataset(X=X_test, y=y_test) # - # ### 14. Build Neural Net Model # + import torch as torch import torch.nn as nn from src.models.pytorch import PytorchMultiClass import torch.nn.functional as F # %load_ext autoreload # %autoreload 2 model = PytorchMultiClass(X_train.shape[1]) # - from src.models.pytorch import get_device, predict device = get_device() model.to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # + from src.models.pytorch import train_classification, test_classification N_EPOCHS = 10 BATCH_SIZE = 100 for epoch in range(N_EPOCHS): train_loss, train_acc = train_classification(train_dataset, model=model, criterion=criterion, optimizer=optimizer, batch_size=BATCH_SIZE, device=device) valid_loss, valid_acc = test_classification(val_dataset, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device) print(f'Epoch: {epoch}') print(f'\t(train)\t|\tLoss: {train_loss:.4f}\t|\tAcc: {train_acc * 100:.1f}%') print(f'\t(valid)\t|\tLoss: {valid_loss:.4f}\t|\tAcc: {valid_acc * 100:.1f}%') # - test_loss, test_acc = test_classification(test_dataset, model=model, criterion=criterion, batch_size=BATCH_SIZE, device=device) print(f'\tLoss: {test_loss:4f}\t | \tAccuracy:{test_acc:.1f}') torch.save(model, "../models/pytorch_multi_beer_type_prediction_nn_kmeans.pt") # + import torch as torch import os import sys sys.path.append(os.path.abspath('../')) model = torch.load('../models/pytorch_multi_beer_type_prediction_nn_kmeans.pt', encoding='ascii') model # + from torch import Tensor from src.models.pytorch import predict # features=['brewery_name','review_aroma','review_appearance', 'review_palate', 'review_taste'] # query_data = pd.DataFrame([{5,7,8,3,5,4}], columns = features ) query_data =pd.DataFrame( {'brewery_name': [6, 6,8,9 ], 'review_aroma': [22000,25000,27000,35000], 'review_appearance': [7,8,4,3], 'review_palate': [3,4,2,5], 'review_taste': [1,3,5,6] }) #query_data_tensor = torch.tensor(query_data.to_numpy()) prediction = predict(query_data, model) # #obs = pd.DataFrame(X_main_scaled_copy.iloc[0][num_cols]).transpose() # prediction
notebooks_old/7_Encoding_Scaling_withoutReviewTime_sampleSize200k-Kmeans-5clusters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview # # * 똑똑한 사람과 천성이 학습자들의 특성중에 하나는 묻는걸 두려워하지 않는다는 것과 답에 귀를 기울인다는 것이다. # * 여기서 짧고 간단한 질문들을 하려sek. # * 질문들에 직접적이고 짧은 답을 찾는 것은 쉽지 않다. # * WBS 안쓴다고 문제되겠나? # * WBS를 다 안쓴다면 나는 써야하는가? # * WBS를 어떻게 적용할 건가? # * WBS는 어떤 업계에서 사용할 수 있나? # * 어떤 Project에서 WBS를 써야하는가? # * 이모든 질문에 생각한 후에 자신에 가치 있는지 입증하기 위해 WBS를 사용할지 결정해라. # * WBS를 어떻게 하면 적절하게 사용할지? 어떻게 하면 Project의 범위를 잘 관리할지에 고민해라. # * 이번장은 이런 질문에 답을 얻어 보자. # # What if you don't use the WBS? # * WBS를 사용하지 않을 시, Project의 범위와 Project의 다른 영역을 관리하는 방법에 대한 개선의 기회를 잃게 되는 것이다. # * WBS없는 프로젝트 관리는 종착지에서 찾게될 목표물에 대한 구상없이 모르는 장소를 여행하는 것과 같다. # * 다음은 가치있는 WBS를 사용하지 않아서 발생할 수 있는 것들이다. # # * 당신이 놓친 작업들을 발굴하는데에서 프로젝트 진척이 시작되니 프로젝트를 계획하는데 소비한 시간은 길어질 것이다. 이게 전형적이다. # * 당신의 프로젝트 계획의 품질은 조잡하거나 가치있는 WBS를 사용할때를 따라가지 못할 수 있다. # * 이해관계자는 프로젝트 작업에 대한 이해가 그리 좋지 않아서 이해관계자의 기대는 관리하기 어렵게 될 것이다. 이는 다국어 프로젝트나 세계화 프로젝트시에는 아주 심각하다. # * 번위 변경 요청을 검토/평가/승인/리젝으로 프로젝트의 베이스라인을 반복적으로 수정하게 되어 리스크를 감수하게 된다. # * 놓친 작업으로 예산을 넘기기 일수다. 추가적인 금전을 요구하게 된다. 시간을 까먹게 된다. # * 승인된 프로젝트 범위 전체를 계획한것이 아닌걸 깨닫곤 또는 승인된 범위 외적인 범위를 계획하곤 힘들어진다. # * 작업에 차이가 나는 부분에 작업 컴포넌트를 달성할 책임자와 책무자가 깔끔하지 않다. # * 주기적인 재계획으로 납기를 넘길 수 있다. # * 프로젝트가 완벽한 계획을 수립했더라도 범위가 엉성하게 정의되면 커스터머가 요구하는 걸 배포할 수 없다. # * 숨차다, 좋은 프로젝트 범위 정의를 가지고 WBS를 사용해야 하는 이유는 더 많다. # * 전에 WBS를 사용한적이 없다면 학습곡선이 존재할 거다. 허나 단시간에 성공적으로 할 수 있다. # # Can you apply the WBS to any project? # * WBS는 수년가 있었고 대부분의 프로젝트에 좋은 툴로 나온다. # * DoD에서 제시한 국방 표준의 WBS를 볼 수 있다. # * WBS는 산업계 전반에서 사용이 가능하다. IT, 헬스, 책 등등. # * 내 커스터머는 텔레커뮤니케이션과 항공, 건설, 타 업계에서 사용하고 있다. # # > 다른 프로젝트나 다른 업계에서 사용되는 WBS는 모두 동일하다 단지 컨텐츠만 다를 뿐. # # When do you create and use the WBS? # * WBS를 만들기 위해선, project scope statement이 필요할 테다. # * WBS는 PSS와 일관되야 한다. # * PMBOK에 따르면 요구사항 문서 또한 필요하다. # * WBS와 Req.를 병렬로 정의할 수 있고 범위 베이스라인을 수립할 때까지 WBS를 정련한다. # * 프로젝트에 대해서 많이 알수록 현실성있는 계획을 수립할 수 있고 계획과 실행의 질이 높다. # * 저자의 경우 **Project Charter**와 **Scope statement**를 받자마자 바로 실행한다. # # > 실무 팁 # 프로젝트 시작하자마자 WBS를 작성해라. 요거이 핵심요소이다. # * 프로젝트 전반에서 WBS를 사용한다. # * Create it during the project initiation, # * Refine it during the project planning, # * Use it to monitor and control during the project execution, and # * Verify that work from internal and external sources is complete during the project closing. # # Who should create the WBS? # * Project manager는 WBS 개발에 핵심 인력이고 이해 관계자는 참여자이다 # * Project manager는 project team과 적당한 이해 관계자와 일한다. # * 프로젝트에 의존적으로 전문가가 포함될 수 있다. # * WBS를 작업을 수행할 개개인과 함께 생성하는 건 중요하다. # # > 실무 팁 # WBS는 유사한 작업을 수행한 경험이 있는 사람이나 작업을 수행하기 위해 투입되는 사람과 작업되어야 한다. # * PM으로써 WBS를 iterative하게 가져간단다. # * 생성 과정에 비즈 분석가도 한몫한다. # * Program manager와 PMO manager도 핵심 주자다. # # How do you create the WBS? # * 적당한 WBS를 생성하는건 어렵지 않다. # * 간단한 툴이나 개념에서 도움을 얻을 수 있다. # * 프로젝트 크기와 복잡도에 따라서 수시간에서 수일을 잡아 먹을 수 있다. # * ch6은 효과적인 WBS를 생성하는 입증된 단계를 설명한다. # # How should you update the WBS? # * 승인나기전에 언제나 수정할 수 있다. # * 일단 승인나면 변경 제어 공정을 따를 필요가 있다. # * WBS를 갱신할 때, 일관성 차원에서 다른 프로젝트 문서도 같이 리뷰되어야 한다. # > 실무 팁 # WBS 갱신은 일정과 리스크, 조달과 같은 문서에 영향을 주지 않는다는 점을 기억하자. # * Project문서를 갱신할 때 WBS에 영향을 줄 수도 있다. # * WBS는 때론 가용한 상태로 전환되는 신규 정보에 기반하여 정련된다. # * Scope이 변경될 일이 없으면 정형의 변경 공정을 탈 필요는 없다. # <img src='./figures/f4.1.jpg'> # > 실무 팁 # 추적과 기록의 목적은 WBS를 수정할 때마다 파일의 버전을 생성한다. # # What is the PMO's role in the WBS? # * The PMO can provide training about the WBS so project managers become more effective in its use. # * The PMO staff can coach project managers on how to best use, apply, represent, and communicate the WBS. # * The PMO can create WBS guidelines and templates to guide the process to create and update the WBS and to ensure consistency among different projects. # * The PMO can research what the best software is for the organization to use when creating, updating, and communicating the WBS. It can procure the software and train the project managers and team in its use. # * The PMO can work with project managers to ensure that the WBS is linked to the schedule and other project documents in order to ensure appropriate planning and higher success chances. # * The PMO can ensure that project managers use the appropriate WBS templates and guidelines to facilitate the scope comparison of different projects belonging to the same program or portfolio. # * WBS 사용을 평가하기 시작하면서 Project manager가 겪는 많은 질문에 답을 했다. # * 이 답변에 대한 배경 정보는 WBS의 이해를 돕고 WBS의 장접에 대한 확신을 줄게다. # # References # 1. [U.S.] Department of Defense. 1993. Department of Defense Handbook: Work Breakdown Structures for Defense Material Items, MIL-STD-881B. Washington, D.C.: Department of Defense. # 1. PMBOK® Guide—Fifth Edition. 125. # <img src='./figures/thank-you.gif'> # <img src='./figures/QA.jpg'> # # @sol2man2
ch04.What Are the Key Questions to Understanding the WBS?.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * from pandas import read_html # - # # How does the growth rate of the invasive tribble fish in Lake Evolution, an isolated lake, affect the population of the native cute fish over time? # Lake Evolution has held a steady population of cute fish in equilibrium for as long as anyone can remember. However, with the recent spread of the invasive, quick reproducing tribble fish, many locals are worried about how their introduction to Lake Evolution might affect the native population. Tribble fish have been shown to have a widely varying growth rate based on many factors. So, how would their growth rate affect the cute fish population if they were to be introduced to Evolution Lake? # # To explore this we will create a model of the tribble fish’s population growth based on the set growth rate. Next, we will calculate the tribble fish’s consumption of Lake Evolution’s resources. Finally, we will model the cute fish’s population based on the resources left available to them. # # We will run the model a series of times with varying tribble growth rates to show how the cute population might be affected based on how quickly the tribble reproduce. # # system = System(cute_cap = 1375, tribble_cap = 11000, cute_birth_rate = 0.05, tribble_initial_pop=2) lake_sim = State(AR = 100, cute_death_rate=0.05, tribble_annual_growth = 0.01) # Above are the variables and objects we established to set up Evolution Lake. # # #### System variable explanations: # ``` # Cute_cap represents the carrying capacity for just the cute fish based on the ratio of body mass to volume of water # Tribble_cap represents the carrying capacity for just the tribble fish based on the ratio of body mass to volume of water # Tribble_initial_pop represents the tribble's population, it starts at 2 because that's what's needed for a population to start # ``` # # #### State object explanations: # ``` # # AR represents the amount of resources available to the cute fish in the lake. This number is in percentage (it doesn't # mean there are 100 pieces of food in the lake) # ``` # # ## **The numbers for cute fish and tribble fish are based on salmon and sunfish statistics, respectively* # def run_simulation(lake_sim, tribble_growth_rate, system, number): #the tribble growth rate changes based on what is passed tribble_results = TimeSeries() #establishes the series to hold each year's tribble population tribble_results[0] = system.tribble_initial_pop #the start of the tribble population AR_results = TimeSeries() #establishes the series to keep track of each year's available resources for the cute fish cute_results = TimeSeries() #establishes the series to keep track of the cute fish's population throughout the iterations cute_results[0] = system.cute_cap #the start of the cute fish's population just before the tribbles invade the lake for t in range(300): #300 represents the 300 years this will simulate/predict lake_sim.tribble_annual_growth = tribble_results[t]*tribble_growth_rate* (1- (tribble_results[t]/system.tribble_cap)) #this formula calculates the annual growth of the tribble fish using the passed rate tribble_results[t+1] = tribble_results[t] + lake_sim.tribble_annual_growth #this adds the annual growth the the previous year's population AR_results[t] = (1- (tribble_results[t]/system.tribble_cap)*0.125)*100 #available resources in the lake for cute fish lake_sim.cute_death_rate = 0.05 + (1- AR_results[t]/100)* 0.95 #this formula calculates the death rate of the cute fish based off of the available resources cute_results[t+1] = cute_results[t] + ((system.cute_birth_rate*cute_results[t]) - (lake_sim.cute_death_rate*cute_results[t])) #this adds the annual growth (the last two-thirds of the equation) to the previous year's population if number == 2: plot_both_pops(tribble_results, cute_results) #this is here to give the option of printing out a single run through else: return cute_results def plot_both_pops(tribble_results, cute_results): #plots both populations for comparison with a set tribble gorwht rate plot(cute_results, label = 'Cute Population') plot(tribble_results, label='Tribble Population') decorate(xlabel = 'Years', ylabel = 'Pop (in millions)', title = 'Cute Fish and Trible Fish Population vs Time') legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.) plt.show() run_simulation(lake_sim, 0.05, system, 2) #runs the simulation once with tribble growth rate at 0.05 # ### Snapshot of the Simulation, showing the populations' relationship # # The above graph shows the result of run_simulation with a plot of both fish populations. You can see how the increase of the tribble fish caused a rapid decline in the cute fish's population. Notice how the cute fish go extinct before the tribble fish reach their carrying capacity. This shows how the diminished resources affect the cute fish fatally even before they are completely depleted. # # The graph shows how once the cute fish population begins to crash even the minority still recieving available resources can't survive very long. At a crucial point the native population will not be able to sustain itself and begins a quick decline. def growth_rate_sweep(): growth_array = linspace(0.01,0.1,10) #this creates the array of tribble growth rates for t in growth_array: #the model runs 10 times with each varying growth rate plot(run_simulation(lake_sim, t, system,0), label = "%0.2f"%t) #the cute fish population is plotted for each growth rate decorate(xlabel = 'Years', ylabel = 'Pop (in millions)', title = 'Cute Fish Population vs Time') legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.) plt.show() growth_rate_sweep() # # Interpretation: # This figure shows the cute fish's population based on the tribble fish's varying growth rates. The growth rate is the initial annual percentage growth of the tribble fish population. As the population increases the growth rate declines correspondingly to represent the carrying capacity of the pond, making the population curve quadratic. The dependent cute fish population thus takes a similar shape. # # As can be seen in the final figure, the cute fish's population declines ever sooner as the tribble's growth rate increases. We were surprised to see a geometric relationship between the timing of the different population curves decline because the tribble fish growth rate changes at a constant linear rate. As the growth rate decreases the cute fish population crash happens at what appears to be exponentially later times. # # The rate at which the cute fish go extinct is also correspondingly lower based on the diminished tribble growth rate as expected. We didn't, however, expect the tribble fish population to take so long to start, making large scale impact on the cute fish. Even in the most invasive situation, the cute fish's population doesn't strongly decline until after year 50. # # We failed to consider a couple of aspects in our model. Firstly, we did not account for the cute fish's ability to adapt to the diminished resources and competition. An improved model could account for this. Secondly, we assumed that the tribble fish had no predators and that the cute fish's predators would not adapt their diet as their normal food supply was depleted. Furthermore, we ignored other changes that might occur due to the introduction of a new species in a closed ecosystem. There are innumerable factors that may affect either population unpredictably and lead to massive trophic cascade. # # The model did inform us that given enough time, the tribble fish will completely wipe out the cute fish. However, lower tribble growth rates mean the cute fish population survives exponentially longer. This is very important, as it shows the most effective way to handle an invasive species. # # As soon as tribble fish are introduced to Evolution Lake, they start to spread, but their population doesn't spike for five decades at least. During that time the population of invasive species can be suppressed or even fished to extinction. This lowers their growth rate giving the native species a longer time to adapt survival strategies. Once the invasive population reaches a certain size though, the native species will be set on a quick path to extinction.
code/ProjectKTandJeremy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <!-- This course and all its materials can be found at https://github.com/msramalho/Teach-Me-Quantum where they are ket in the most recent version, subject to open source contributions from the community --> # # <h1 align="center">Teach Me Quantum</h1> # # A university-level course on **Quantum Computing** and **Quantum Information Science** that incorporates [IBM Q Experience](https://quantumexperience.ng.bluemix.net/qx/experience) and [Qiskit](https://www.qiskit.org/). # This course is adequate for general audiences without prior knowledge on Quantum Mechanics and Quantum Computing (see [prior knowledge](#prior-knowledge)), has an estimated average duration of **10 weeks at 3h/week** (see [duration](#duration)) and is meant to be the entrypoint into the **Quantum World**. # # <p align="center"><img width="300px" src="https://i.imgur.com/39Mv9Ra.gif"></p> # # This course is **Open-source** and appropriate for both _autodidacticism_ as well as _classroom teaching_ by educators, professors and lecturers in their own classes. Given the dynamic nature of the topic, any open-source contributions and future improvements are welcome. # # ## Course Overview # # * 📁 [Week_0-Hello_Quantum_World](Week_0-Hello_Quantum_World/README.ipynb) # * 📖 [Slides](Week_0-Hello_Quantum_World/slides.pdf) # * 📁 [Week_1-Quantum_Tools](Week_1-Quantum_Tools/README.ipynb) # * 📖 [Slides](Week_1-Quantum_Tools/slides.pdf) # * 📁 [Exercises](Week_1-Quantum_Tools/exercises/README.ipynb) # * 📁 [Week_2-Quantum_Information_Science](Week_2-Quantum_Information_Science/README.ipynb) # * 📖 [Slides](Week_2-Quantum_Information_Science/slides.pdf) # * 📁 [Exercises](Week_2-Quantum_Information_Science/exercises/README.ipynb) # * 📁 [Week_3-Quantum_Gates](Week_3-Quantum_Gates/README.ipynb) # * 📖 [Slides](Week_3-Quantum_Gates/slides.pdf) # * 📁 [Exercises](Week_3-Quantum_Gates/exercises/README.ipynb) # * 📁 [Week_4-Quantum_Facts](Week_4-Quantum_Facts/README.ipynb) # * 📖 [Slides](Week_4-Quantum_Facts/slides.pdf) # * 📁 [Exercises](Week_4-Quantum_Facts/exercises/README.ipynb) # * 📁 [Week_5-Quantum_Algorithms](Week_5-Quantum_Algorithms/README.ipynb) (Deutsch's_algorithm) # * 📖 [Slides](Week_5-Quantum_Algorithms/slides.pdf) # * 📁 [Exercises](Week_5-Quantum_Algorithms/exercises/README.ipynb) # * 📁 [Week_6-Quantum_Search](Week_6-Quantum_Search/README.ipynb) (Grover's_algorithm) # * 📖 [Slides](Week_6-Quantum_Search/slides.pdf) # * 📁 [Exercises](Week_6-Quantum_Search/exercises/README.ipynb) # * 📁 [Week_7-Quantum_Factorization](Week_7-Quantum_Factorization/README.ipynb) (Shor's_algorithm) # * 📖 [Slides](Week_7-Quantum_Factorization/slides.pdf) # * 📁 [Exercises](Week_7-Quantum_Factorization/exercises/README.ipynb) # * 📁 [Week_8-High_Level_Quantum_Programming](Week_8-High_Level_Quantum_Programming/README.ipynb) (qiskit-aqua) # * 📖 [Slides](Week_8-High_Level_Quantum_Programming/slides.pdf) # * 📁 [Exercises](Week_8-High_Level_Quantum_Programming/exercises/README.ipynb) # * 📁 [Week_9-State_of_the_Quantum_Art](Week_9-State_of_the_Quantum_Art/README.ipynb) # * 📖 [Slides](Week_9-State_of_the_Quantum_Art/slides.pdf) # # ## Prior Knowledge # Students of this course are expected to be familiar with (this can be done while going through the course): # * [Python](https://www.python.org/) language # * [Jupyter](http://jupyter.org/) Notebook environment # * Some linear algebra: inner and outer products, eigenvalues, norms, transpose, adjoints (complex conjugates), tensor product, ... # # ## Learning Goals # After completing this course, students should be able to: # * Understand the basics of Quantum Mechanics # * Know how a computing model can be built around Quantum Mechanics # * Understand the advantages, disadvantages and implications of Quantum Computing # * Understand Quantum Information Science and how it contrasts with classical information theory # * Leverage QISKit towards research and development in the _Quantum World_ # * Understand the empirical differences between Quantum Simulators and real Quantum Devices (such as those available in IBMQ) # * Design, interpret and deploy quantum circuits (simulators and real processors) # * Know and describe a few of the most common quantum algorithms: Deutsch, Grover, Shor # * Be able to quickly understand new quantum algorithms based on the same principles: Simon, ... # * Be able to quickly understand new principles of quantum computing: Adiabatic, ... # * Understand the impact that the advance of quantum computers can have on the world as we know it # * Understand High Level applications of near-end quantum algorithms, namely how to use qiskit-aqua for solving real world problems # * Move on to deeper waters, by exploring according to their heart's desire! # # ## Duration # * Estimated average duration of **10 weeks at 3h/week** # * Duration is flexible depending on level of depth a teacher imposes on each week. # * Usually 1h theory + 2h practice, except for: # * week 0 (1h + 0h = 1h) # * week 1 (1h + 1h = 2h) # * week 9 (2h + 0h = 2h) # * Estimated total time: **25h to 30h**. # # # ## Customizing Slides # #### Reusing Slides # The materials in this course can be adapted to specific classes, contexts, schools,... to the desire of the educators. # # The `author`, `date` and `instute` properties of each presentation is defined in the respective `macros.sty` file (this file is an exact copy for each week). If you want to update global settings for all weeks (like the author, update links colors, update code snippets display, ...) you can use the [`sh replicate_macros.sh`](utils/replicate_macros.sh) (linux) | [`replicate_macros.bat`](utils/replicate_macros.bat) (windows) to replicate the changes from a copy of the file for every week's folder (the source file must be in the [utils](utils/) folder, there is already an updated version of [macros.sty](utils/macros.sty) in there). # # The constraint for using this materials is to replace the `\author[]{}` command by the following command: `\author[LASTNAME]{FIRSTNAME LASTNAME,\\ \scriptsize{based on slides by \textbf{<NAME>}}}` with the update author info. # # #### Animating Slides # Each `.pdf` file for slides is static, but if you want to include animations you can do so by replacing the `document` class in the first line of the `main.tex` files by `\documentclass[handout]{beamer}`) and following [these instructions](https://tex.stackexchange.com/a/177060/126771). # # # #### Adding Notes to Slides # This can also be accomplished by appending the following lines before `\begin{document}`: # ```tex # \usepackage{pgfpages} # \setbeameroption{show notes} # \setbeameroption{show notes on second screen=right} # ``` # # #### Compiling LaTeX files # To achieve this use any LaTeX compiler of your choice, if you have [pdflatex](https://www.tug.org/applications/pdftex/) you can simply do `pdflatex main.tex` on each week's latex folder. # # #### Presenting Slides with Notes and/or Animations # To present the slides with **notes** or with **animations** will only work with an external program, I advise [dannyedel/dspdfviewer](https://github.com/dannyedel/dspdfviewer/releases) which also has dual screen and timer functionality. # # --- # # ## Aditional notes # Each week's slides has a particular theme, that is to help students distinguish between them and strengthen the learning process by fostering association. # # ## Final remarks # This was an excelent oportunity to both teach and learn. I have sought to come up with a methodology of how I wanted my quantum education to have been, hopefully this will help others with similar learning tastes. If not, oh well, shift happens... # # <p align="center"><img src="http://assets.amuniversal.com/7c4d9f70a05b012f2fe600163e41dd5b"></p> #
community/awards/teach_me_quantum_2018/TeachMeQ/README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 4: Multi-factor Model # ## Instructions # Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity. # # ## Packages # When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code. # # The other packages that we're importing are `project_helper` and `project_tests`. These are custom packages built to help you solve the problems. The `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems. # # ### Install Packages # + # Before you are able to use the packages installed on the jupyter notebook, # you need to install jupyter notebook at Terminal specifically for your virtualenv # (if you are using virtualenv), i.e. $ python -m pip install jupyter import sys # # !conda install -c quantopian zipline # must perform zipline installation by conda first # # !conda install pandas-datareader==0.2.1 # followed by this # # !{sys.executable} -m pip install -r requirements.txt # # !brew install graphviz # # !conda install -c conda-forge pydotplus # - # ### Load Packages # + import cvxpy as cvx import numpy as np import pandas as pd import time import project_tests import project_helper import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (14, 8) # - # # ## Data Bundle # We'll be using Zipline to handle our data. We've created a end of day data bundle for this project. Run the cell below to register this data bundle in zipline. # + import os import project_helper from zipline.data import bundles os.environ['ZIPLINE_ROOT'] = '/Users/leehongkai/.zipline' print(project_helper.EOD_BUNDLE_NAME) ingest_func = bundles.csvdir.csvdir_equities(['daily'], project_helper.EOD_BUNDLE_NAME) bundles.register(project_helper.EOD_BUNDLE_NAME, ingest_func) print('Data Registered') # - # ## Build Pipeline Engine # We'll be using Zipline's pipeline package to access our data for this project. To use it, we must build a pipeline engine. Run the cell below to build the engine. # + from zipline.pipeline import Pipeline from zipline.pipeline.factors import AverageDollarVolume from zipline.utils.calendars import get_calendar universe = AverageDollarVolume(window_length=120).top(450) trading_calendar = get_calendar('NYSE') bundle_data = bundles.load(project_helper.EOD_BUNDLE_NAME) engine = project_helper.build_pipeline_engine(bundle_data, trading_calendar) # - # ### View Data # With the pipeline engine built, let's get the stocks at the end of the period in the universe we're using. We'll use these tickers to generate the returns data for the our risk model. # + universe_end_date = pd.Timestamp('2019-04-04', tz='UTC') universe_tickers = engine\ .run_pipeline( Pipeline(screen=universe), universe_end_date, universe_end_date)\ .index.get_level_values(1)\ .values.tolist() universe_tickers # - # ## Get Returns # Not that we have our pipeline built, let's access the returns data. We'll start by building a data portal. # + from zipline.data.data_portal import DataPortal data_portal = DataPortal( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=bundle_data.equity_daily_bar_reader.first_trading_day, equity_minute_reader=None, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader) # - # To make the code easier to read, we've built the helper function `get_pricing` to get the pricing from the data portal. def get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'): end_dt = pd.Timestamp(end_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') start_dt = pd.Timestamp(start_date.strftime('%Y-%m-%d'), tz='UTC', offset='C') end_loc = trading_calendar.closes.index.get_loc(end_dt) start_loc = trading_calendar.closes.index.get_loc(start_dt) return data_portal.get_history_window( assets=assets, end_dt=end_dt, bar_count=end_loc - start_loc, frequency='1d', field=field, data_frequency='daily') # ### View Data # Let's get returns data for our risk model using the `get_pricing` function. For this model, we'll be looking back to 5 years of data. # + five_year_returns = \ get_pricing( data_portal, trading_calendar, universe_tickers, universe_end_date - pd.DateOffset(years=5), universe_end_date)\ .pct_change()[1:].fillna(0) five_year_returns # - # # Statistical Risk Model # It's time to build the risk model. You'll be creating a statistical risk model using PCA. So, the first thing is building the PCA model. # ## Fit PCA # Implement `fit_pca` to fit a PCA model to the returns data # + from sklearn.decomposition import PCA def fit_pca(returns, num_factor_exposures, svd_solver): """ Fit PCA model with returns. Parameters ---------- returns : DataFrame Returns for each ticker and date num_factor_exposures : int Number of factors for PCA svd_solver: str The solver to use for the PCA model Returns ------- pca : PCA Model fit to returns """ #TODO: Implement function pca = PCA(n_components = num_factor_exposures, svd_solver = svd_solver) return pca.fit(returns) # project_tests.test_fit_pca(fit_pca) # - # ### View Data # Let's see what the model looks like. First, we'll look at the PCA components. # + num_factor_exposures = 20 pca = fit_pca(five_year_returns, num_factor_exposures, 'full') pca.components_ # - # Let's also look at the PCA's percent of variance explained by each factor plt.bar(np.arange(num_factor_exposures), pca.explained_variance_ratio_) # You will see that the first factor dominates. The precise definition of each factor in a latent model is unknown, however we can guess at the likely interpretation. # ## Factor Betas # Implement `factor_betas` to get the factor betas from the PCA model. # + def factor_betas(pca, factor_beta_indices, factor_beta_columns): """ Get the factor betas from the PCA model. Parameters ---------- pca : PCA Model fit to returns factor_beta_indices : 1 dimensional Ndarray Factor beta indices factor_beta_columns : 1 dimensional Ndarray Factor beta columns Returns ------- factor_betas : DataFrame Factor betas """ assert len(factor_beta_indices.shape) == 1 assert len(factor_beta_columns.shape) == 1 # print(pd.DataFrame(pca.components_.T, factor_beta_indices, factor_beta_columns)) #TODO: Implement function return pd.DataFrame(pca.components_.T, factor_beta_indices, factor_beta_columns) project_tests.test_factor_betas(factor_betas) # - # ### View Data # Let's view the factor betas from this model. # + risk_model = {} risk_model['factor_betas'] = factor_betas(pca, five_year_returns.columns.values, np.arange(num_factor_exposures)) risk_model['factor_betas'] # - # ## Factor Returns # Implement `factor_returns` to get the factor returns from the PCA model using the returns data. # + def factor_returns(pca, returns, factor_return_indices, factor_return_columns): """ Get the factor returns from the PCA model. Parameters ---------- pca : PCA Model fit to returns returns : DataFrame Returns for each ticker and date factor_return_indices : 1 dimensional Ndarray Factor return indices factor_return_columns : 1 dimensional Ndarray Factor return columns Returns ------- factor_returns : DataFrame Factor returns """ assert len(factor_return_indices.shape) == 1 assert len(factor_return_columns.shape) == 1 #TODO: Implement function # print(pd.DataFrame(pca.transform(returns), factor_return_indices, factor_return_columns)) return pd.DataFrame(pca.transform(returns), factor_return_indices, factor_return_columns) project_tests.test_factor_returns(factor_returns) # - # ### View Data # Let's see what these factor returns looks like over time. # + risk_model['factor_returns'] = factor_returns( pca, five_year_returns, five_year_returns.index, np.arange(num_factor_exposures)) # risk_model['factor_returns'].shape risk_model['factor_returns'].cumsum().plot(legend=None) # - # ## Factor Covariance Matrix # Implement `factor_cov_matrix` to get the factor covariance matrix. # + def factor_cov_matrix(factor_returns, ann_factor): """ Get the factor covariance matrix Parameters ---------- factor_returns : DataFrame Factor returns ann_factor : int Annualization factor Returns ------- factor_cov_matrix : 2 dimensional Ndarray Factor covariance matrix """ #TODO: Implement function return np.diag(factor_returns.var(axis=0, ddof=1))*ann_factor project_tests.test_factor_cov_matrix(factor_cov_matrix) # - # ### View Data # + ann_factor = 252 risk_model['factor_cov_matrix'] = factor_cov_matrix(risk_model['factor_returns'], ann_factor) risk_model['factor_cov_matrix'] # - # ## Idiosyncratic Variance Matrix # Implement `idiosyncratic_var_matrix` to get the idiosyncratic variance matrix. # + def idiosyncratic_var_matrix(returns, factor_returns, factor_betas, ann_factor): """ Get the idiosyncratic variance matrix Parameters ---------- returns : DataFrame Returns for each ticker and date factor_returns : DataFrame Factor returns factor_betas : DataFrame Factor betas ann_factor : int Annualization factor Returns ------- idiosyncratic_var_matrix : DataFrame Idiosyncratic variance matrix """ #TODO: Implement function common_returns = pd.DataFrame(np.dot(factor_returns, factor_betas.T), returns.index, returns.columns) s_returns = returns - common_returns return pd.DataFrame(np.diag(np.var(s_returns))*ann_factor, returns.columns, returns.columns) project_tests.test_idiosyncratic_var_matrix(idiosyncratic_var_matrix) # - # ### View Data # + risk_model['idiosyncratic_var_matrix'] = idiosyncratic_var_matrix(five_year_returns, risk_model['factor_returns'], risk_model['factor_betas'], ann_factor) risk_model['idiosyncratic_var_matrix'] # - # ## Idiosyncratic Variance Vector # Implement `idiosyncratic_var_vector` to get the idiosyncratic variance Vector. # + def idiosyncratic_var_vector(returns, idiosyncratic_var_matrix): """ Get the idiosyncratic variance vector Parameters ---------- returns : DataFrame Returns for each ticker and date idiosyncratic_var_matrix : DataFrame Idiosyncratic variance matrix Returns ------- idiosyncratic_var_vector : DataFrame Idiosyncratic variance Vector """ #TODO: Implement function return pd.DataFrame(np.diag(idiosyncratic_var_matrix), returns.columns) project_tests.test_idiosyncratic_var_vector(idiosyncratic_var_vector) # - # ### View Data # + risk_model['idiosyncratic_var_vector'] = idiosyncratic_var_vector(five_year_returns, risk_model['idiosyncratic_var_matrix']) risk_model['idiosyncratic_var_vector'] # - # ## Predict using the Risk Model # Using the data we calculated in the risk model, implement `predict_portfolio_risk` to predict the portfolio risk using the formula $ \sqrt{X^{T}(BFB^{T} + S)X} $ where: # - $ X $ is the portfolio weights # - $ B $ is the factor betas # - $ F $ is the factor covariance matrix # - $ S $ is the idiosyncratic variance matrix # + def predict_portfolio_risk(factor_betas, factor_cov_matrix, idiosyncratic_var_matrix, weights): """ Get the predicted portfolio risk Formula for predicted portfolio risk is sqrt(X.T(BFB.T + S)X) where: X is the portfolio weights B is the factor betas F is the factor covariance matrix S is the idiosyncratic variance matrix Parameters ---------- factor_betas : DataFrame Factor betas factor_cov_matrix : 2 dimensional Ndarray Factor covariance matrix idiosyncratic_var_matrix : DataFrame Idiosyncratic variance matrix weights : DataFrame Portfolio weights Returns ------- predicted_portfolio_risk : float Predicted portfolio risk """ assert len(factor_cov_matrix.shape) == 2 #TODO: Implement function asset_variance = np.dot(np.dot(factor_betas, factor_cov_matrix),factor_betas.T) + idiosyncratic_var_matrix portfolio_variance = np.dot(np.dot(weights.T, asset_variance), weights) return np.sqrt(portfolio_variance[0][0]) project_tests.test_predict_portfolio_risk(predict_portfolio_risk) # - # ### View Data # Let's see what the portfolio risk would be if we had even weights across all stocks. # + all_weights = pd.DataFrame(np.repeat(1/len(universe_tickers), len(universe_tickers)), universe_tickers) predict_portfolio_risk( risk_model['factor_betas'], risk_model['factor_cov_matrix'], risk_model['idiosyncratic_var_matrix'], all_weights) # - # # Create Alpha Factors # With the profile risk calculated, it's time to start working on the alpha factors. In this project, we'll create the following factors: # - Momentum 1 Year Factor # - Mean Reversion 5 Day Sector Neutral Factor # - Mean Reversion 5 Day Sector Neutral Smoothed Factor # - Overnight Sentiment Factor # - Overnight Sentiment Smoothed Factor # # ## Momentum 1 Year Factor # Each factor will have a hypothesis that goes with it. For this factor, it is "Higher past 12-month (252 days) returns are proportional to future return." Using that hypothesis, we've generated this code: # + from zipline.pipeline.factors import Returns def momentum_1yr(window_length, universe, sector): return Returns(window_length=window_length, mask=universe) \ .demean(groupby=sector) \ .rank() \ .zscore() # - # ## Mean Reversion 5 Day Sector Neutral Factor # Now it's time for you to implement `mean_reversion_5day_sector_neutral` using the hypothesis "Short-term outperformers(underperformers) compared to their sector will revert." Use the returns data from `universe`, demean using the sector data to partition, rank, then converted to a zscore. # + def mean_reversion_5day_sector_neutral(window_length, universe, sector): """ Generate the mean reversion 5 day sector neutral factor Parameters ---------- window_length : int Returns window length universe : Zipline Filter Universe of stocks filter sector : Zipline Classifier Sector classifier Returns ------- factor : Zipline Factor Mean reversion 5 day sector neutral factor """ #TODO: Implement function return -Returns(window_length=window_length, mask=universe)\ .demean(groupby=sector)\ .rank()\ .zscore() project_tests.test_mean_reversion_5day_sector_neutral(mean_reversion_5day_sector_neutral) # - # ### View Data # Let's see what some of the factor data looks like. For calculating factors, we'll be looking back 2 years. # # **Note:** _Going back 2 years falls on a day when the market is closed. Pipeline package doesn't handle start or end dates that don't fall on days when the market is open. To fix this, we went back 2 extra days to fall on the next day when the market is open._ # + factor_start_date = universe_end_date - pd.DateOffset(years=2, days=2) sector = project_helper.Sector() window_length = 5 pipeline = Pipeline(screen=universe) pipeline.add( mean_reversion_5day_sector_neutral(window_length, universe, sector), 'Mean_Reversion_5Day_Sector_Neutral') engine.run_pipeline(pipeline, factor_start_date, universe_end_date) # - # ## Mean Reversion 5 Day Sector Neutral Smoothed Factor # Taking the output of the previous factor, let's create a smoothed version. Implement `mean_reversion_5day_sector_neutral_smoothed` to generate a mean reversion 5 fay sector neutral smoothed factor. Call the `mean_reversion_5day_sector_neutral` function to get the unsmoothed factor, then use `SimpleMovingAverage` function to smooth it. You'll have to apply rank and zscore again. # + from zipline.pipeline.factors import SimpleMovingAverage def mean_reversion_5day_sector_neutral_smoothed(window_length, universe, sector): """ Generate the mean reversion 5 day sector neutral smoothed factor Parameters ---------- window_length : int Returns window length universe : Zipline Filter Universe of stocks filter sector : Zipline Classifier Sector classifier Returns ------- factor : Zipline Factor Mean reversion 5 day sector neutral smoothed factor """ #TODO: Implement function factor = mean_reversion_5day_sector_neutral(window_length, universe, sector) return SimpleMovingAverage(inputs = [factor], window_length = window_length).rank().zscore() project_tests.test_mean_reversion_5day_sector_neutral_smoothed(mean_reversion_5day_sector_neutral_smoothed) # - # ### View Data # Let's see what some of the smoothed data looks like. pipeline = Pipeline(screen=universe) pipeline.add( mean_reversion_5day_sector_neutral_smoothed(5, universe, sector), 'Mean_Reversion_5Day_Sector_Neutral_Smoothed') engine.run_pipeline(pipeline, factor_start_date, universe_end_date) # ## Overnight Sentiment Factor # For this factor, were using the hypothesis from the paper [_Overnight Returns and Firm-Specific Investor Sentiment_](https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2554010). # + from zipline.pipeline.data import USEquityPricing class CTO(Returns): """ Computes the overnight return, per hypothesis from https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2554010 """ inputs = [USEquityPricing.open, USEquityPricing.close] def compute(self, today, assets, out, opens, closes): """ The opens and closes matrix is 2 rows x N assets, with the most recent at the bottom. As such, opens[-1] is the most recent open, and closes[0] is the earlier close """ out[:] = (opens[-1] - closes[0]) / closes[0] class TrailingOvernightReturns(Returns): """ Sum of trailing 1m O/N returns """ window_safe = True def compute(self, today, asset_ids, out, cto): out[:] = np.nansum(cto, axis=0) def overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe): cto_out = CTO(mask=universe, window_length=cto_window_length) return TrailingOvernightReturns(inputs=[cto_out], window_length=trail_overnight_returns_window_length) \ .rank() \ .zscore() # - # ## Overnight Sentiment Smoothed Factor # Just like the factor you implemented, we'll also smooth this factor. def overnight_sentiment_smoothed(cto_window_length, trail_overnight_returns_window_length, universe): unsmoothed_factor = overnight_sentiment(cto_window_length, trail_overnight_returns_window_length, universe) return SimpleMovingAverage(inputs=[unsmoothed_factor], window_length=trail_overnight_returns_window_length) \ .rank() \ .zscore() # ## Combine the Factors to a single Pipeline # With all the factor implementations done, let's add them to a pipeline. # + universe = AverageDollarVolume(window_length=120).top(500) sector = project_helper.Sector() pipeline = Pipeline(screen=universe) pipeline.add( momentum_1yr(252, universe, sector), 'Momentum_1YR') pipeline.add( mean_reversion_5day_sector_neutral(5, universe, sector), 'Mean_Reversion_5Day_Sector_Neutral') pipeline.add( mean_reversion_5day_sector_neutral_smoothed(5, universe, sector), 'Mean_Reversion_5Day_Sector_Neutral_Smoothed') pipeline.add( overnight_sentiment(2, 5, universe), 'Overnight_Sentiment') pipeline.add( overnight_sentiment_smoothed(2, 5, universe), 'Overnight_Sentiment_Smoothed') all_factors = engine.run_pipeline(pipeline, factor_start_date, universe_end_date) all_factors.head() # - # # Evaluate Alpha Factors # *Note:* _We're evaluating the alpha factors using delay of 1_ # ## Get Pricing Data # + import alphalens as al assets = all_factors.index.levels[1].values.tolist() pricing = get_pricing( data_portal, trading_calendar, assets, factor_start_date, universe_end_date) # - np.__version__ # ## Format alpha factors and pricing for Alphalens # In order to use a lot of the alphalens functions, we need to aligned the indices and convert the time to unix timestamp. In this next cell, we'll do just that. # + clean_factor_data = { factor: al.utils.get_clean_factor_and_forward_returns(factor=factor_data, prices=pricing, periods=[1]) for factor, factor_data in all_factors.iteritems()} unixt_factor_data = { factor: factor_data.set_index(pd.MultiIndex.from_tuples( [(x.timestamp(), y) for x, y in factor_data.index.values], names=['date', 'asset'])) for factor, factor_data in clean_factor_data.items()} # - # ## Quantile Analysis # ### Factor Returns # Let's view the factor returns over time. We should be seeing it generally move up and to the right. # + ls_factor_returns = pd.DataFrame() for factor, factor_data in clean_factor_data.items(): ls_factor_returns[factor] = al.performance.factor_returns(factor_data).iloc[:, 0] (1+ls_factor_returns).cumprod().plot() # - # ### Basis Points Per Day per Quantile # It is not enough to look just at the factor weighted return. A good alpha is also monotonic in quantiles. Let's looks the basis points for the factor returns. # + qr_factor_returns = pd.DataFrame() for factor, factor_data in unixt_factor_data.items(): qr_factor_returns[factor] = al.performance.mean_return_by_quantile(factor_data)[0].iloc[:, 0] (10000*qr_factor_returns).plot.bar( subplots=True, sharey=True, layout=(4,2), figsize=(14, 14), legend=False) # - # What do you observe? # # - None of these alphas are **strictly monotonic**; this should lead you to question why this is? Further research and refinement of the alphas needs to be done. What is it about these alphas that leads to the highest ranking stocks in all alphas except MR 5D smoothed to *not* perform the best. # - The majority of the return is coming from the **short side** in all these alphas. The negative return in quintile 1 is very large in all alphas. This could also a cause for concern becuase when you short stocks, you need to locate the short; shorts can be expensive or not available at all. # - If you look at the magnitude of the return spread (i.e., Q1 minus Q5), we are working with daily returns in the 0.03%, i.e., **3 basis points**, neighborhood *before all transaction costs, shorting costs, etc.*. Assuming 252 days in a year, that's 7.56% return annualized. Transaction costs may cut this in half. As such, it should be clear that these alphas can only survive in an institutional setting and that leverage will likely need to be applied in order to achieve an attractive return. # # ## Turnover Analysis # # Without doing a full and formal backtest, we can analyze how stable the alphas are over time. Stability in this sense means that from period to period, the alpha ranks do not change much. Since trading is costly, we always prefer, all other things being equal, that the ranks do not change significantly per period. We can measure this with the **factor rank autocorrelation (FRA)**. # # [alphalens.performance.factor_rank_autocorrelation](https://quantopian.github.io/alphalens/alphalens.html?highlight=factor_rank_autocorrelation#alphalens.performance.factor_rank_autocorrelation) # + ls_FRA = pd.DataFrame() for factor, factor_data in unixt_factor_data.items(): ls_FRA[factor] = al.performance.factor_rank_autocorrelation(factor_data) ls_FRA.plot(title="Factor Rank Autocorrelation") # - # ## Sharpe Ratio of the Alphas # # The last analysis we'll do on the factors will be sharpe ratio. Implement `sharpe_ratio` to calculate the sharpe ratio of factor returns. # + def sharpe_ratio(factor_returns, annualization_factor): """ Get the sharpe ratio for each factor for the entire period Parameters ---------- factor_returns : DataFrame Factor returns for each factor and date annualization_factor: float Annualization Factor Returns ------- sharpe_ratio : Pandas Series of floats Sharpe ratio """ #TODO: Implement function return annualization_factor*factor_returns.mean()/factor_returns.std() project_tests.test_sharpe_ratio(sharpe_ratio) # - # ### View Data # Let's see what the sharpe ratio for the factors are. Generally, a Sharpe Ratio of near 1.0 or higher is an acceptable single alpha for this universe. daily_annualization_factor = np.sqrt(252) sharpe_ratio(ls_factor_returns, daily_annualization_factor).round(2) # ## Question: What do you think would happen if we smooth the momentum factor? Would the performance increase, decrease, or no major change? Why? # #TODO: Put Answer In this Cell_ # # **Answer:** # # Based on the FRA plots, 1-YR Momentum factor was close to 1 at all time (generally stable). In fact it gave the most stable correlations within the study period, compared to the rest of the factors. # # As smoothing functions to reduce impact of noise (fluaction of the factor return values in this case) in the factors, the performance of momentum factor in terms of the stability of the factor return within the study period (i.e. factor return autocorrelation) is less likely going to benefit from the smoothing approach. # # ## The Combined Alpha Vector # # To use these alphas in a portfolio, we need to combine them somehow so we get a single score per stock. This is a area where machine learning can be very helpful. In this module, however, we will take the simplest approach of combination: simply averaging the scores from each alpha. selected_factors = all_factors.columns[[1, 2, 4]] print('Selected Factors: {}'.format(', '.join(selected_factors))) all_factors['alpha_vector'] = all_factors[selected_factors].mean(axis=1) alphas = all_factors[['alpha_vector']] alpha_vector = alphas.loc[all_factors.index.get_level_values(0)[-1]] alpha_vector.head() # # Optimal Portfolio Constrained by Risk Model # You have an alpha model and a risk model. Let's find a portfolio that trades as close as possible to the alpha model but limiting risk as measured by the risk model. You'll be building thie optimizer for this portfolio. To help you out. we have provided you with an abstract class called `AbstractOptimalHoldings`. # + from abc import ABC, abstractmethod class AbstractOptimalHoldings(ABC): @abstractmethod def _get_obj(self, weights, alpha_vector): """ Get the objective function Parameters ---------- weights : CVXPY Variable Portfolio weights alpha_vector : DataFrame Alpha vector Returns ------- objective : CVXPY Objective Objective function """ raise NotImplementedError() @abstractmethod def _get_constraints(self, weights, factor_betas, risk): """ Get the constraints Parameters ---------- weights : CVXPY Variable Portfolio weights factor_betas : 2 dimensional Ndarray Factor betas risk: CVXPY Atom Predicted variance of the portfolio returns Returns ------- constraints : List of CVXPY Constraint Constraints """ raise NotImplementedError() def _get_risk(self, weights, factor_betas, alpha_vector_index, factor_cov_matrix, idiosyncratic_var_vector): f = factor_betas.loc[alpha_vector_index].values.T * weights X = factor_cov_matrix S = np.diag(idiosyncratic_var_vector.loc[alpha_vector_index].values.flatten()) return cvx.quad_form(f, X) + cvx.quad_form(weights, S) def find(self, alpha_vector, factor_betas, factor_cov_matrix, idiosyncratic_var_vector): weights = cvx.Variable(len(alpha_vector)) risk = self._get_risk(weights, factor_betas, alpha_vector.index, factor_cov_matrix, idiosyncratic_var_vector) obj = self._get_obj(weights, alpha_vector) constraints = self._get_constraints(weights, factor_betas.loc[alpha_vector.index].values, risk) prob = cvx.Problem(obj, constraints) prob.solve(max_iters=500) optimal_weights = np.asarray(weights.value).flatten() return pd.DataFrame(data=optimal_weights, index=alpha_vector.index) # - # ## Objective and Constraints # Using this class as a base class, you'll implement the `OptimalHoldings` class. There's two functions that need to be implemented in this class, the `_get_obj` and `_get_constraints` functions. # # The `_get_obj` function should return an CVXPY objective function that maximizes $ \alpha^T * x \\ $, where $ x $ is the portfolio weights and $ \alpha $ is the alpha vector. # # The `_get_constraints` function should return a list of the following constraints: # - $ r \leq risk_{\text{cap}}^2 \\ $ # - $ B^T * x \preceq factor_{\text{max}} \\ $ # - $ B^T * x \succeq factor_{\text{min}} \\ $ # - $ x^T\mathbb{1} = 0 \\ $ # - $ \|x\|_1 \leq 1 \\ $ # - $ x \succeq weights_{\text{min}} \\ $ # - $ x \preceq weights_{\text{max}} $ # # Where $ x $ is the portfolio weights, $ B $ is the factor betas, and $ r $ is the portfolio risk # # The first constraint is that the predicted risk be less than some maximum limit. The second and third constraints are on the maximum and minimum portfolio factor exposures. The fourth constraint is the "market neutral constraint: the sum of the weights must be zero. The fifth constraint is the leverage constraint: the sum of the absolute value of the weights must be less than or equal to 1.0. The last are some minimum and maximum limits on individual holdings. # + class OptimalHoldings(AbstractOptimalHoldings): def _get_obj(self, weights, alpha_vector): """ Get the objective function Parameters ---------- weights : CVXPY Variable Portfolio weights alpha_vector : DataFrame Alpha vector Returns ------- objective : CVXPY Objective Objective function """ assert(len(alpha_vector.columns) == 1) #TODO: Implement function return cvx.Maximize(np.array(alpha_vector.T)*weights) def _get_constraints(self, weights, factor_betas, risk): """ Get the constraints Parameters ---------- weights : CVXPY Variable Portfolio weights factor_betas : 2 dimensional Ndarray Factor betas risk: CVXPY Atom Predicted variance of the portfolio returns Returns ------- constraints : List of CVXPY Constraint Constraints """ assert(len(factor_betas.shape) == 2) #TODO: Implement function return [risk<=self.risk_cap**2.0, \ np.array(factor_betas.T)*weights<=self.factor_max, \ np.array(factor_betas.T)*weights>=self.factor_min, \ sum(weights)==0.0, \ sum(cvx.abs(weights))<=1.0, \ weights>=self.weights_min, \ weights<=self.weights_max ] def __init__(self, risk_cap=0.05, factor_max=10.0, factor_min=-10.0, weights_max=0.55, weights_min=-0.55): self.risk_cap=risk_cap self.factor_max=factor_max self.factor_min=factor_min self.weights_max=weights_max self.weights_min=weights_min project_tests.test_optimal_holdings_get_obj(OptimalHoldings) project_tests.test_optimal_holdings_get_constraints(OptimalHoldings) # - # ### View Data # With the `OptimalHoldings` class implemented, let's see the weights it generates. # + optimal_weights = OptimalHoldings().find(alpha_vector, risk_model['factor_betas'], risk_model['factor_cov_matrix'], risk_model['idiosyncratic_var_vector']) optimal_weights.plot.bar(legend=None, title='Portfolio % Holdings by Stock') x_axis = plt.axes().get_xaxis() x_axis.set_visible(False) # - # Yikes. It put most of the weight in a few stocks. project_helper.get_factor_exposures(risk_model['factor_betas'], optimal_weights).plot.bar( title='Portfolio Net Factor Exposures', legend=False) # ## Optimize with a Regularization Parameter # In order to enforce diversification, we'll use regularization in the objective function. We'll create a new class called `OptimalHoldingsRegualization` which gets its constraints from the `OptimalHoldings` class. In this new class, implement the `_get_obj` function to return a CVXPY objective function that maximize $ \alpha^T * x + \lambda\|x\|_2\\ $, where $ x $ is the portfolio weights, $ \alpha $ is the alpha vector, and $ \lambda $ is the regularization parameter. # # **Note:** * $ \lambda $ is located in `self.lambda_reg`. * # + class OptimalHoldingsRegualization(OptimalHoldings): def _get_obj(self, weights, alpha_vector): """ Get the objective function Parameters ---------- weights : CVXPY Variable Portfolio weights alpha_vector : DataFrame Alpha vector Returns ------- objective : CVXPY Objective Objective function """ assert(len(alpha_vector.columns) == 1) #TODO: Implement function return cvx.Maximize(np.array(alpha_vector.T)*weights - \ self.lambda_reg * cvx.norm(weights, 2) ) def __init__(self, lambda_reg=0.5, risk_cap=0.05, factor_max=10.0, factor_min=-10.0, weights_max=0.55, weights_min=-0.55): self.lambda_reg = lambda_reg self.risk_cap=risk_cap self.factor_max=factor_max self.factor_min=factor_min self.weights_max=weights_max self.weights_min=weights_min project_tests.test_optimal_holdings_regualization_get_obj(OptimalHoldingsRegualization) # - # ### View Data # + optimal_weights_1 = OptimalHoldingsRegualization(lambda_reg=5.0).find(alpha_vector, risk_model['factor_betas'], risk_model['factor_cov_matrix'], risk_model['idiosyncratic_var_vector']) optimal_weights_1.plot.bar(legend=None, title='Portfolio % Holdings by Stock') x_axis = plt.axes().get_xaxis() x_axis.set_visible(False) # - # Nice. Well diversified. project_helper.get_factor_exposures(risk_model['factor_betas'], optimal_weights_1).plot.bar( title='Portfolio Net Factor Exposures', legend=False) # ## Optimize with a Strict Factor Constraints and Target Weighting # Another common formulation is to take a predefined target weighting, $x^*$ (e.g., a quantile portfolio), and solve to get as close to that portfolio while respecting portfolio-level constraints. For this next class, `OptimalHoldingsStrictFactor`, you'll implement the `_get_obj` function to minimize on on $ \|x - x^*\|_2 $, where $ x $ is the portfolio weights $ x^* $ is the target weighting. # + class OptimalHoldingsStrictFactor(OptimalHoldings): def _get_obj(self, weights, alpha_vector): """ Get the objective function Parameters ---------- weights : CVXPY Variable Portfolio weights alpha_vector : DataFrame Alpha vector Returns ------- objective : CVXPY Objective Objective function """ assert(len(alpha_vector.columns) == 1) #TODO: Implement function alpha_vector1 = np.array(alpha_vector) #Lesson 29-8 x_star = (alpha_vector1 - alpha_vector1.mean())/sum(abs(alpha_vector1)) return cvx.Minimize(cvx.norm(weights-x_star.reshape(len(x_star),),2, )) project_tests.test_optimal_holdings_strict_factor_get_obj(OptimalHoldingsStrictFactor) # - # ### View Data # + optimal_weights_2 = OptimalHoldingsStrictFactor( weights_max=0.02, weights_min=-0.02, risk_cap=0.0015, factor_max=0.015, factor_min=-0.015).find(alpha_vector, risk_model['factor_betas'], risk_model['factor_cov_matrix'], risk_model['idiosyncratic_var_vector']) optimal_weights_2.plot.bar(legend=None, title='Portfolio % Holdings by Stock') x_axis = plt.axes().get_xaxis() x_axis.set_visible(False) # - project_helper.get_factor_exposures(risk_model['factor_betas'], optimal_weights_2).plot.bar( title='Portfolio Net Factor Exposures', legend=False) # ## Submission # Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
Alpha_Research_and_Factor_Modeling/project_4_production.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.13 64-bit (''cocorepr36'': conda)' # name: python3 # --- # # Crop tree representation # > Methods to work with `crop_tree` representation: load/dump from/to a directory. # + # default_exp crop_tree # - # %load_ext autoreload # %autoreload 2 # + # export import json import shutil import logging from collections import defaultdict from dataclasses import dataclass, Field from typing import * from pathlib import Path from multiprocessing import Pool, Lock from cocorepr.utils import sort_dict, measure_time, read_image, write_image, cut_bbox from cocorepr.coco import * # - logger = logging.getLogger() # hide logging.basicConfig(level=logging.DEBUG) # + # export def load_crop_tree( source_dir: Union[str, Path], base_coco: CocoDataset, *, kind: str = "object_detection", ) -> CocoDataset: """ Load modified set of crops from `{path}/crops` and use it to filter out the annotations in `base_coco`. """ dataset_class = get_dataset_class(kind) source_dir = Path(source_dir) logger.info(f"Loading crop_tree from dir: {source_dir}") if not source_dir.is_dir(): raise ValueError(f"Source dir not found: {source_dir}") crops_dir = source_dir / 'crops' if not crops_dir.exists(): raise ValueError(f'Source crops dir not found: {crops_dir}') catid2cat = {cat.id: cat for cat in base_coco.categories} imgid2img = {img.id: img for img in base_coco.images} annid2ann = {ann.id: ann for ann in base_coco.annotations} annid2imgid = {ann.id: ann.image_id for ann in base_coco.annotations} res_cats = {} res_imgs = {} res_anns = {} with measure_time() as timer1: for count1, ann_dir in enumerate(crops_dir.iterdir(), 1): cat_id = str(ann_dir.name.split('--')[-1]) cat = catid2cat[cat_id] with measure_time() as timer2: for count2, ann_file in enumerate(ann_dir.glob('*.png'), 1): ann_id = str(ann_file.stem) ann = annid2ann[ann_id] img_id = annid2imgid[ann_id] img = imgid2img[img_id] res_cats[cat.id] = cat res_imgs[img.id] = img res_anns[ann.id] = ann logger.debug(f'- loaded {count2} crops from {ann_dir}: elapsed {timer2.elapsed}') logger.info(f'Loaded from {count1} crop directories: elapsed {timer1.elapsed}') with measure_time() as timer: D = { **base_coco.to_dict(), 'images': list(res_imgs.values()), 'annotations': list(res_anns.values()), 'categories': list(res_cats.values()), } logger.info(f'Dataset dict constructed: elapsed {timer.elapsed}') with measure_time() as timer: coco = dataset_class.from_dict(D) logger.info(f'Dataset object constructed: elapsed {timer.elapsed}: {coco.to_full_str()}') return coco # + # hide import tempfile from cocorepr.json_tree import load_json_tree, dump_json_tree from pathlib import Path SRC_COCO = '../examples/coco_chunk/json_tree/' SRC_BLOB = '../examples/coco_chunk/crop_tree/' expected_crop_ids = sorted([str(p.stem) for cat_p in (Path(SRC_BLOB)/'crops').iterdir() for p in cat_p.iterdir()]) display(expected_crop_ids) DST = tempfile.mktemp() coco_json_tree = load_json_tree(SRC_COCO) display(coco_json_tree) d = load_crop_tree(SRC_BLOB, coco_json_tree) actual_crop_ids = sorted([ann.id for ann in d.annotations]) display(actual_crop_ids) assert actual_crop_ids == expected_crop_ids, actual_crop_ids # + # # export # def _cut_to_chunks(L: List[Any], n) -> List[List[Any]]: # assert n > 0 # return [ # L[i: i+n] + [None]*(n - len(L[i: i+n])) # for i in range(0, len(L), n) # ] # + # res = _cut_to_chunks([1,2,3,4,5,6,7], 4) # assert res == [[1, 2, 3, 4], [5, 6, 7, None]] # res = _cut_to_chunks([1,2,3,4,5,6,7], 3) # assert res == [[1, 2, 3], [4, 5, 6], [7, None, None]], res # res = _cut_to_chunks([1,2,3,4,5,6,7], 2) # assert res == [[1, 2], [3, 4], [5, 6], [7, None]], res # res = _cut_to_chunks([1,2,3,4,5,6,7], 1) # assert res == [[1], [2], [3], [4], [5], [6], [7]], res # res = _cut_to_chunks([], 2) # assert res == [] # + # export def _delete_extra_files(coco, target_dir, images_dir, crops_dir, catid2cat): logger.info(f'Collecting extra files to clean in root {target_dir}') to_remove = [] a = {images_dir} b = {images_dir/img.get_file_name() for img in coco.images} c = {crops_dir} d = {crops_dir/catid2cat[cat.id].get_dir_name() for cat in coco.categories} e = {crops_dir/catid2cat[ann.category_id].get_dir_name()/ann.get_file_name() for ann in coco.annotations} all_files = a | b | c | d | e for p in target_dir.glob('**/*'): if p not in all_files: to_remove.append(p) to_remove = sorted(to_remove) removed_str = '\n'.join(map(str, to_remove)) if removed_str: removed_str = '\n' + removed_str logger.info(f'Removing {len(to_remove)} files and dirs:{removed_str}') # reversed so that files get deleted before their dirs for p in reversed(to_remove): try: if p.is_file(): p.unlink() else: shutil.rmtree(str(p)) except BaseException as e: fod = 'file' if p.is_file() else 'dir' logger.warning(f'Could not delete {fod} {p} (ignoring!): {e}') continue logger.info(f'[.] Removed {len(to_remove)} files and dirs.') def _process_image(img, anns, images_dir, crops_dir, catid2cat, anns_failed_file): file_name = img.get_file_name() image_file = images_dir / file_name image = None for ann in anns: cat = catid2cat[ann.category_id] cat_dir = crops_dir / cat.get_dir_name() cat_dir.mkdir(exist_ok=True) ann_file = cat_dir / ann.get_file_name() if ann_file.is_file(): continue if image is None: image = read_image(image_file, download_url=img.coco_url) try: box = cut_bbox(image, ann.bbox) write_image(box, ann_file) except ValueError as e: logger.error(f"{e}. Img({img.coco_url}), BBox({ann.bbox})") with anns_failed_file.open('a') as f: f.write(json.dumps(ann.to_dict(), ensure_ascii=False) + '\n') def _process_image_list(args): if not args: return _process_image(*args) def dump_crop_tree( coco: CocoDataset, target_dir: Union[str, Path], *, kind: str = 'object_detection', skip_nulls: bool = True, overwrite: bool = False, indent: Optional[int] = 4, num_processes: int = 1, ) -> None: try: from tqdm.auto import tqdm except ImportError: logger.warning("Could not import tqdm, please run 'pip install tqdm'") def tqdm(it, *args, **kwargs): yield from it dataset_class = get_dataset_class(kind) if skip_nulls: to_dict_function = dataset_class.to_dict_skip_nulls else: to_dict_function = dataset_class.to_dict target_dir = Path(target_dir) logger.info(f"Dumping crop_tree to dir: {target_dir}") if overwrite: if target_dir.is_dir(): logger.warning(f'Destination and will be overwritten: {target_dir}') elif target_dir.is_dir(): raise ValueError(f"Destination json tree dir already exists: {target_dir}") #if overwrite and target_dir.is_dir(): # logger.info(f'Deleting old target directory {target_dir}') # shutil.rmtree(str(target_dir)) target_dir.mkdir(parents=True, exist_ok=True) catid2cat = {cat.id: cat for cat in coco.categories} imgid2img = {img.id: img for img in coco.images} imgid2anns = defaultdict(list) for ann in coco.annotations: imgid2anns[ann.image_id].append(ann) images_dir = target_dir / 'images' images_dir.mkdir(exist_ok=True) crops_dir = target_dir / 'crops' crops_dir.mkdir(exist_ok=True) anns_failed = [] anns_failed_file = crops_dir / 'crops_failed.ndjson' if overwrite and crops_dir.is_dir(): _delete_extra_files(coco, target_dir, images_dir, crops_dir, catid2cat) with measure_time() as timer: pairs = [ (imgid2img[imgid], anns, images_dir, crops_dir, catid2cat, anns_failed_file) for (imgid, anns) in imgid2anns.items() ] #chunks = _cut_to_chunks(pairs, num_processes) with Pool(num_processes) as pool: #for chunk in tqdm(chunks): list(tqdm(pool.imap(_process_image_list, pairs), total=len(pairs), desc='Processing images')) #process_map(_process_image_list, chunks, total=len(imgid2anns), desc='Processing images', max_workers=num_processes) logger.info(f'Crops written to {crops_dir}: elapsed {timer.elapsed}') if anns_failed: logger.warning(f'Failed to process {len(anns_failed)} crops, see file {anns_failed_file}') # + # hide import tempfile from cocorepr.json_tree import load_json_tree SRC = '../examples/coco_chunk/json_tree/' DST = tempfile.mktemp() d = load_json_tree(SRC) dump_crop_tree(d, DST, num_processes=2) actual = ! ls {DST}/crops actual = set(actual) expected = set(c.get_dir_name() for c in d.categories) assert expected == actual, (expected, actual) # some_crops = !ls expected # + crop_files = ! ls {DST}/crops some_crops = ! ls {DST}/crops/{crop_files[0]} # ! echo {some_crops} deleted_crop = some_crops[0] # ! rm {DST}/crops/{crop_files[0]}/{deleted_crop} # ! ls {DST}/crops/{crop_files[0]} d2 = load_crop_tree(DST, d) assert deleted_crop not in d2.to_json(), (deleted_crop, d2.to_json()) # -
nbs/04_crop_tree.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import openpyxl # Give the location of the file path = "Example for data processing (3).xlsx" # workbook object is created wb_obj = openpyxl.load_workbook(path) sheet_obj = wb_obj.active count=0 count1=0 m_row = sheet_obj.max_row for i in range(10, m_row + 1): cell_obj = sheet_obj.cell(row = i, column = 4) count=count+1 count1=count1+cell_obj.value if cell_obj.value == None: break print(cell_obj.value) print(m_row) print(count) for i in range(10,count+1) cell_obj = sheet_obj.cell(row=i,column=4) count1=count1+cell_obj.value print(count1) # -
panda/.ipynb_checkpoints/Untitled14-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Proyecto Integrador # Este informe/proyecto es una continuación de proyecto 3 de Series de tiempo del sprint 3 del programa intensivo de Data Science. Se puede descargar el proyecto [aquí](https://github.com/juanchi10/Proyecto-Intregador/blob/main/DS_Proyecto_03%20-%20Juan_Badino.ipynb) o se puede clonar todo el repositorio completo vía [HTTPs](https://github.com/juanchi10/Proyecto-Intregador.git). [SSH](<EMAIL>:juanchi10/Proyecto-Intregador.git) o descargar tipo [ZIP](https://github.com/juanchi10/Proyecto-Intregador/archive/refs/heads/main.zip). # Desde la administración de la __Ciudad de Buenos Aires__ buscamos digitalizar todos los peajes de la Capital Federal, con el objetivo aumentar la fluidez de tránsito, y a su vez permitir mejoras en la calidad del servicio. De esa manera, este análisis lo hacemos para poder tomar mayor información de flujo vehicular en relación a el modelo de pago con __efectivo__ del peaje __Illia__ en estos años y planificar a futuro. Sabemos que en Argentina la mitad de la población no está bancarizada y la mitad de la economía se maneja en negro. # # Por lo tanto, el cambio de modelo de pago efectivo no puede darse de un día para el otro sino que debería ir bajando en el tiempo para incorporar el método de Telepase o TAG digital. Con esta predicción acompañada de la tendencia que ya vimos en el análisis anterior vamos a poder tomar mejores ideas para generar este cambio y no hacerlo a ciegas. Podríamos entender en qué momento de los siguientes años se eliminará por completo el método de pago en efectivo. Al mismo tiempo, sabiendo cuando va a ser ese momento estimado podemos implementar medidas para acelerar o retrasar dicho momento dependiendo qué se busca desde la administración de la __Ciudad de Buenos Aires__. # # Para eso, vamos a a usar los modelos del Proyecto 3 como Benchmark para luego generar un nuevo modelo más avanzado mutlivariable con Long Short-Term Memory Network (LSTM) en Keras. # # + [markdown] heading_collapsed="true" # ### Imports # - import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.style as style import datetime from math import sqrt from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.metrics import mean_squared_error from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM # + [markdown] heading_collapsed="true" # ### Modelo LSTM con Keras # + [markdown] heading_collapsed="true" # #### Introducción # - # Basándonos en la sugerencia de __<NAME>__ en las correcciones del proyecto 3 vamos a usar la base propuesta por __<NAME>__ el 14 de agosto de 2017 en [Deep Learning Learning for Time Series](https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/). Para el próximo paso de puede descargar el dataset aquí. [aquí](https://github.com/juanchi10/Proyecto-Intregador/blob/main/dataset_dia.csv). Son datos preprocesados del proyecto 3 del Peaje Illia, método efectivo. # # Al mismo tiempo, para profundizar en el tema __<NAME>__ ha creado un [guía](https://machinelearningmastery.com/lstms-with-python/) muy interesante. df = pd.read_csv('/Users/juanchi/Documents/Data Science/Curso/Proyecto 3/dataset_dia.csv', header=0, index_col=0) df.shape # La [función siguiente](https://machinelearningmastery.com/convert-time-series-supervised-learning-problem-python/) nos permite convertir nuestra serie de datos en un problema de aprendizaje supervisado. `data` es la serie, `n_in` es la cantidad de datos anteriores queremos usar para predecir un nuevo valor y `n_out` cuánto para adelante vamos a predecir. # # En nuestro caso vamos a trabajar con One-Step Prediction, usando __120__ datos anteriores porque vimos que era un número adecuado según nuestro dataset según el análisis de correlación. # # Por lo tanto, esta función creará nuevas columnas con __var1 (t-1)__ y __var2 (t-1)__ que es `cantidad_pasos`y `dias` de un día anterior hasta __var1 (t-120)__ y __var12 (t-120)__, siendo __var1 (t)__ la variable a predecir. def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = pd.DataFrame(data) cols, names = list(), list() # entrada de secuencia (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # secuencia de pronóstico (t, t + 1, ... t + n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # Unimos todos agg = pd.concat(cols, axis=1) agg.columns = names # Eliminamos valores nulos if dropnan: agg.dropna(inplace=True) return agg # Como marcamos en los comentarios finales del proyecto, la intención era crear un modelo más avanzado con otras variables que afectan a # `cantidad_pasos`. Para eso queremos entender cómo influye la variable día de la semana. Sabemos que los días domingos es el pico inferior de pases diarios y el viernes el pico mayor. Para eso, vamos a encoderar las variables para que sean núméricas. Vamos a usar un __LabelEncoder__ y luego vamos a escalar todo el dataset para normalizarlo. En esta parte se podría porbar hacer un __get_dummies__ o un __oneHot_Enconder__ también. # + [markdown] heading_collapsed="true" # #### Preparación de los datos # + #LabelEncoder de la columna que tiene los días. values = df.values encoder = LabelEncoder() values[:,1] = encoder.fit_transform(values[:,1]) values = values.astype('float32') # Escalamos todo el dataset scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) # Llamamos a la función que convierte la serie para aprendizaje supervisado lags = 120 reframed = series_to_supervised(scaled, lags , 1) # Dejamos de lado la columna que no tiene sentido predecir que es el día. reframed.drop(reframed.columns[[lags * df.shape[1] + 1]], axis=1, inplace=True) print(reframed.shape) # - # Hasta aquí nos queda un dataset de __975 filas con 241 columnas__. ¿A qué responde esto? Tenemos __3 años o 1095 días de datos de 2017, 2018, 2019__. Al usar 120 datos anteriores solamente podemos usar __975 datos para predecir__. Al mismo tiempo, tenemos 240 variables predictoras en columnas y 1 variable objetivo. # + [markdown] heading_collapsed="true" # #### Train y Test # + # Dividir el dataset en TRAIN y TEST values = reframed.values n_train_dias = int(reframed.shape[0]*0.7) #Usamos el estándar de 70/30 train = values[:n_train_dias, :] test = values[n_train_dias:, :] print(n_train_dias) # Dividimos en entradas y salidas train_X, train_y = train[:, :-1], train[:, -1] test_X, test_y = test[:, :-1], test[:, -1] # Se modifica el formato de entrada para que sea 3D y tenga los componentes necesarios de la red [samples, timesteps, features]. train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1])) test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # + [markdown] heading_collapsed="true" # #### Armado y entrenamiento del modelo # - # Creamos la red que vamos a usar con 50 neuronas en una primera capa y 1 de resultado esperado, luego vamos a optimizar esos parámetros.* En este caso, __Jason__ nos propone usar MAE como métrica de error y la versión de Adam de descenso por gradiente para calcular el costo. También vamos a usar 120 epoch que es un hiperparámetro que define el número de veces que el algoritmo funcionará frente al dataset. # # *Luego de optimizar a mano, logramos que 90 Epoch nos diera el mejor resultado. # + model = Sequential() model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Dense(1)) model.compile(loss='mae', optimizer='adam') # Entrenamos la Red history = model.fit(train_X, train_y, epochs=90, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False) # Ploteamos el historial plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() # + [markdown] heading_collapsed="true" # #### Predicción # + # Predicción test_X yhat = model.predict(test_X) test_X = test_X.reshape((test_X.shape[0], test_X.shape[2])) # Invertimos la escala para pronosticar inv_yhat = np.concatenate((yhat, test_X[:, lags * df.shape[1] - 1:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] # Invertimos la escala por la actual test_y = test_y.reshape((len(test_y), 1)) inv_y = np.concatenate((test_y, test_X[:, lags * df.shape[1] - 1:]), axis=1) inv_y = scaler.inverse_transform(inv_y) inv_y = inv_y[:,0] # Predicción train_X yhat_train = model.predict(train_X) train_X = train_X.reshape((train_X.shape[0], train_X.shape[2])) # Invertimos la escala para pronosticar inv_yhat_train = np.concatenate((yhat_train, train_X[:, lags * df.shape[1] - 1:]), axis=1) inv_yhat_train = scaler.inverse_transform(inv_yhat_train) inv_yhat_train = inv_yhat_train[:,0] # Invertimos la escala por la actual train_y = train_y.reshape((len(train_y), 1)) inv_y_train = np.concatenate((train_y, train_X[:, lags * df.shape[1] - 1:]), axis=1) inv_y_train = scaler.inverse_transform(inv_y_train) inv_y_train = inv_y_train[:,0] # - # Cálculo del error en RMSE rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) rmse_train = sqrt(mean_squared_error(inv_y_train, inv_yhat_train)) print('Train RMSE: %.3f' % rmse_train) print('Test RMSE: %.3f' % rmse) # Vemos que nuestro modelo de LSMT con KERAS es muy similar a nuestro modelo con una sola variable que hicimos en el proyecto 3 con un __Random Forest__ que tenía un error en Test en test de 4815 pero está cerca. Sin embargo, la aplicabilidad de este modelo nos permite incorporar más variables y realizar pruebas con mayor facilidad que un modelo de una sola variable. # + [markdown] heading_collapsed="true" # #### Visualización # - # Vamos a ver cómo se ve nuestro gráfico en el desempeño de __train_X__ y __test_X__. # + df_dia_pred = np.around(inv_yhat, decimals=0, out=None) df_dia_pred = pd.DataFrame(data=df_dia_pred, columns=['cantidad_pasos']) df_dia_pred_train = np.around(inv_yhat_train, decimals=0, out=None) df_dia_pred_train = pd.DataFrame(data=df_dia_pred_train, columns=['cantidad_pasos']) # - df_dia = pd.read_csv('/Users/juanchi/Documents/Data Science/Curso/Proyecto 3/dataset_dia.csv', sep =',') df_dia = df.set_index(pd.DatetimeIndex(df_dia['fecha_hora'])) df_dia.drop(columns=['dias'], inplace = True) plt.figure(figsize=(16,8)) plt.plot(df_dia.index, df_dia.cantidad_pasos,lw = 0.5, zorder=0, alpha=0.5, label = 'Original') plt.plot(df_dia.index[df.shape[0] - test_X.shape[0]:], df_dia_pred.cantidad_pasos, alpha=0.7, label = 'Test Set') plt.plot(df_dia.index[lags-1:df.shape[0] - test_X.shape[0] - 1], df_dia_pred_train.cantidad_pasos, alpha=0.7, label = 'Train Set') plt.legend() plt.title('Cantidad de vehículos que pasan por Peaje Illia CABA') plt.xlabel('Fecha') plt.ylabel('Cantidad de Pasos') plt.show() # ### Pregunta a responder # Habíamos planteado que queríamos entender cuándo en la ciudad se dejaría de usar el método de pago __EFECTIVO__. En el proyecto 3 descubrimos que hay una clara tendencia a la baja del uso de este método en todos los peajes de acceso. Los factores se deben a la digitalización del servicio con Telepase, alentado por el Gobierno de la Ciudad de Buenos Aires y la transformación digital en el mundo de la finanzas (Fintechs). Sin embargo, queremos entender cúando será el momento donde orgánicamente se va a lograr eliminar este método de pago para entender si se pueden aplicar iniciativas que acelere la tendencia o esperar que el tiempo haga lo suyo. # # ¿CUÁNDO SE DEJARÁ DE USAR EL MÉTODO DE PAGO __EFECTIVO__?* # # *Es importante aclarar que la búsqueda de la respuesta no contempla que factores externos que pueden acelerar o relentizar este proceso. Ej: Pandemia. # # Para esto vamos a tomar el modelo entrenado LSTM y vamos a pronosticar los años 2020, 2021. # + [markdown] heading_collapsed="true" # #### Creación de año siguiente en Pandas # + # Creamos el dataframe para el año 2020 en los que vamos a llenar cuando nuestra predicción este hecha. def nuevo_año (fecha, periodo, formato): dataframe = pd.date_range(fecha, periods=periodo, freq=formato, normalize=True) dataframe = pd.DataFrame(data=dataframe,columns=['fecha_hora']) dataframe['cantidad_pasos'] = 0 dataframe = dataframe.set_index(pd.DatetimeIndex(dataframe['fecha_hora'])) dataframe.drop(columns=['fecha_hora'], inplace = True) dataframe['dias'] = dataframe.index.day_name() return dataframe df2020 = nuevo_año('2020-01-01', 366, 'D') # Año Bisiesto df2 = df.copy() # - df2.iloc[1094] # + [markdown] heading_collapsed="true" # #### Predicción 2020 con LSTM Multivariate # - # En el siguiente código, con el modelo ya entrenado con los datos de __2017__, __2018__ y __2019__, sumando un día a la vez del __2020__ vamos generando la predicción. Esa predicción la sumamos al dataset original, y volvemos loopear ahora con un día más que incluye dentro de su predicción el día anterior que hemos predicho. La predicción la vamos a hacer para los 366 días del 2020 que fue un año bisiesto. # + n = np.arange(366) results = [] for i in n: if df2.iloc[1094 + i, 0] >= 0: df2 = df2.append(df2020.iloc[0 + i]) #print(df2.iloc[1094 + i]) values = df2.values encoder = LabelEncoder() values[:,1] = encoder.fit_transform(values[:,1]) values = values.astype('float32') scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) lags = 120 reframed = series_to_supervised(scaled, lags , 1) reframed.drop(reframed.columns[[lags * df2.shape[1] + 1]], axis=1, inplace=True) values = reframed.values test = values test_X, test_y = test[:, :-1], test[:, -1] test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) yhat = model.predict(test_X[975 + i :]) new = test_X[975 + i:].reshape((test_X[975 + i:].shape[0], test_X[975 + i:].shape[2])) inv_yhat = np.concatenate((yhat, new[:,lags * df2.shape[1] - 1:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] df2.iloc[1095 + i, 0] = np.around(inv_yhat[0]) #results.append(inv_yhat[0]) #print(inv_yhat) else: break # - df2 = df2.set_index(pd.to_datetime(df2.index,format="%Y-%m-%d")) plt.figure(figsize=(16,8)) plt.plot(df2.index, df2.cantidad_pasos,lw = 0.5, zorder=0, alpha=0.5, label = 'Original') plt.plot(df2.index[1095:], df2.cantidad_pasos[1095:], zorder=0, alpha=0.7, label = 'Predicción 2020') plt.legend() plt.title('Cantidad de vehículos que pasan por Peaje Illia CABA') plt.xlabel('Fecha') plt.ylabel('Cantidad de Pasos') plt.show() # Nuestro modelo de __LSTM con Keras con multiples variables (Mutlivariate)__ no es un modelo que esta performando como esperábamos y claramente la suma del `días` como variabe extra no genera mejoras sustenciales que justifiquen el costo computacional y complejidad de modelo. Este modelo no nos permite responder nuestra pregunta inicial de cuando se dejará de usar el método efectivo en el peaje Illia. Sin embargo, queríamos aplicar un modelo LSTM al problema para entender si funcionaba mejor. # # Quedaría hacer otro tipo de pruebas que podemos realizar con el objetivo de mejorar el modelo. Probar el mismo modelo pero solo con la variable de `cantidad_pasos` por ejemplo, optimizar mejor los hiperparámetros, usar __get_dummies__ o __oneHotEncoder__ y quitar la estacionalidad de la serie. # #### Predicción 2020 con RandomForest Univariate # + [markdown] heading_collapsed="true" # ##### Modelo del proyecto 3 # + df3 = df2.cantidad_pasos[:1095] # Definimos X e y look_back = 120 N = df3.shape[0] X = np.zeros((N - look_back - 1,look_back)) y = np.zeros(N - look_back - 1) print(X.shape, y.shape) for i in range(X.shape[0]): X[i,:] = df3.iloc[i:i+look_back] y[i] = df3.iloc[i+look_back] N_train = int(y.shape[0]*0.7) # cantidad de instancias que vamos a usar para entrenar. N_test = int(y.shape[0]*0.3) #cantidad de puntos que vamos a usar para evaluar. N_total = N_train + N_test length_total = N_train + N_test + look_back X_train = X[:N_train,:] y_train = y[:N_train] print(X_train.shape, y_train.shape) X_test = X[N_train:N_train+N_test,:] y_test = y[N_train:N_train+N_test] print(X_test.shape, y_test.shape) from sklearn.metrics import mean_squared_error from sklearn.ensemble import RandomForestRegressor rf = RandomForestRegressor(n_estimators=100, random_state=42, n_jobs = 4).fit(X_train,y_train) y_pred = rf.predict(X) y_train_pred = rf.predict(X_train) y_test_pred = rf.predict(X_test) # - # ##### Proyección 2020 # + df2020_2 = df2020.copy() df2020_2 = df2020.drop(['dias'], axis=1) df2020_2 df2020_3 = df2020_2.copy() n = np.arange(366) for a in n: df3 = df3.append(df2020_2.iloc[0 + a], ignore_index=True) #print(df3.shape) look_back = 120 N = df3.shape[0] #1096 X = np.zeros((N - look_back,look_back)) #1095 - 120 - 1 = 975 for i in range(X.shape[0]): X[i,:] = df3.iloc[i:i+look_back] X_test = X[:,:] y_pred = rf.predict(X_test[975+a:,:]) df3.iloc[1095 + a] = np.around(y_pred[0]) #print(X_test.shape) #print(y_pred) # - df4 = pd.DataFrame(data=df3) df5 = pd.date_range('2017-01-01', periods=1461, freq='D') df5 = pd.DataFrame(data=df5,columns=['fecha_hora']) df5['cantidad_pasos'] = df4 df5 = df5.set_index(pd.DatetimeIndex(df5['fecha_hora'])) df5.drop(columns=['fecha_hora'], inplace = True) # + from sklearn.linear_model import LinearRegression df5['fecha_numeros']=df5.index.map(datetime.datetime.toordinal) lr = LinearRegression() lr.fit(df5.fecha_numeros.values.reshape(-1, 1), df5.cantidad_pasos) # - plt.figure(figsize=(16,8)) plt.plot(df5.index[:1095], df5.cantidad_pasos[:1095],lw = 0.5, zorder=0, alpha=0.5, label = 'Original') plt.plot(df5.index[1095:], df5.cantidad_pasos[1095:], zorder=0, alpha=0.7, label = 'Predicción 2020') plt.plot(df5.index, lr.predict(df5.fecha_numeros.values.reshape(-1, 1)), lw = 1, label = 'Tendencia con ajuste Lineal - EFECTIVO') plt.legend() plt.title('Cantidad de vehículos que pasan por Peaje Illia CABA') plt.xlabel('Fecha') plt.ylabel('Cantidad de Pasos') plt.show() print('Promedio de pases 2017 =', df5.cantidad_pasos[:364].mean()) print('Promedio de pases 2018 =', df5.cantidad_pasos[365:729].mean()) print('Promedio de pases 2019 =', df5.cantidad_pasos[730:1094].mean()) print('Promedio de pases 2020 =', df5.cantidad_pasos[1095:].mean()) # Este último modelo de __RandomForest__ con una sola variable responde mejor a lo que búscabamos. Si bien, no predice correctamente los picos semanales de pases sigue la tendencia negativa que vimos en el proyecto anterior. Notamos también, que el modelo baja su rendimiento una vez pasado los 120 lags donde empieza a predecir 100% con los datos predichos. # # Como conclusión para este problema podemos decir que desde __2017 a 2020_ según nuestro análisis hubo un baja de `cantidad_pasos` de __32%__ en un lapso de 4 años calendario. # # En relación a año contra año: # De 2017 al 2018 hubo __-10%__. # De 2018 a 2019 hubo __-13%__. # De 2019 a 2020 deberíamos esperar una baja del __-14%__. # # + z = 0.86 x = df5.cantidad_pasos[1095:].mean() l = 0 for m in n: if x >= 5000: x = x * z l = l + 1 else: break print(l) # - # Si no hubiera ningún otro factor externo que se sume a los que ya están en juego podríamos suponer que __12 años__ la cantidad de pases en el peaje Illia va estar por debajo de los __5.000__ diarios. Es importante decir, que en 2020 el __Gobierno de la Ciudad de Buenos Aires__ [dispuso como obligatorio el Telepase como método de pago para todos sus peajes](https://www.buenosaires.gob.ar/jefaturadegabinete/movilidad/noticias/por-el-alto-numero-de-adhesiones-diarias-habra-tiempo-hasta-el). Seguramente este nuevo factor, sumado a la explosión de la pandemia, ha aumentado notablemente la baja de uso de efectivo.
Proyecto Integrador2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import nltk # Commented out IPython magic to ensure Python compatibility. # # %cd /content/ease/ease ## IMPORTS ## from essay_set import EssaySet from feature_extractor import FeatureExtractor from predictor_set import PredictorSet from predictor_extractor import PredictorExtractor from sklearn.svm import SVR import pickle import pandas as pd import csv import numpy as np from sklearn.metrics import confusion_matrix from sklearn.model_selection import learning_curve,GridSearchCV #from sklearn import svm, grid_search import nltk #nltk.download('punkt') #nltk.download('averaged_perceptron_tagger') # The following 3 functions have been taken from <NAME>'s github repository # https://github.com/benhamner/Metrics def Cmatrix(rater_a, rater_b, min_rating=None, max_rating=None): """ Returns the confusion matrix between rater's ratings """ assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(rater_a + rater_b) if max_rating is None: max_rating = max(rater_a + rater_b) num_ratings = int(max_rating - min_rating + 1) conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)] for a, b in zip(rater_a, rater_b): conf_mat[a - min_rating][b - min_rating] += 1 return conf_mat def histogram(ratings, min_rating=None, max_rating=None): """ Returns the counts of each type of rating that a rater made """ if min_rating is None: min_rating = min(ratings) if max_rating is None: max_rating = max(ratings) num_ratings = int(max_rating - min_rating + 1) hist_ratings = [0 for x in range(num_ratings)] for r in ratings: hist_ratings[r - min_rating] += 1 return hist_ratings def quadratic_weighted_kappa(y, y_pred): """ Calculates the quadratic weighted kappa axquadratic_weighted_kappa calculates the quadratic weighted kappa value, which is a measure of inter-rater agreement between two raters that provide discrete numeric ratings. Potential values range from -1 (representing complete disagreement) to 1 (representing complete agreement). A kappa value of 0 is expected if all agreement is due to chance. quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b each correspond to a list of integer ratings. These lists must have the same length. The ratings should be integers, and it is assumed that they contain the complete range of possible ratings. quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating is the minimum possible rating, and max_rating is the maximum possible rating """ rater_a = y rater_b = y_pred min_rating=None max_rating=None rater_a = np.array(rater_a, dtype=int) rater_b = np.array(rater_b, dtype=int) assert(len(rater_a) == len(rater_b)) if min_rating is None: min_rating = min(min(rater_a), min(rater_b)) if max_rating is None: max_rating = max(max(rater_a), max(rater_b)) conf_mat = Cmatrix(rater_a, rater_b, min_rating, max_rating) num_ratings = len(conf_mat) num_scored_items = float(len(rater_a)) hist_rater_a = histogram(rater_a, min_rating, max_rating) hist_rater_b = histogram(rater_b, min_rating, max_rating) numerator = 0.0 denominator = 0.0 for i in range(num_ratings): for j in range(num_ratings): expected_count = (hist_rater_a[i] * hist_rater_b[j] / num_scored_items) d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0) numerator += d * conf_mat[i][j] / num_scored_items denominator += d * expected_count / num_scored_items return (1.0 - numerator / denominator) # + ## TRAINING FEATURES ## train_set = pd.read_csv('/home/mehar/github/ease/aes_data/essay1/fold_0/train.txt', sep='\t') x = train_set.to_numpy() tester = x.tolist() print(len(tester)) essaylist = [] scorelist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) #print(y) scorelist.append(float(y[0])) essaylist.append(y[1]) train = EssaySet() print("Done1") for i in range(0, len(essaylist)): train.add_essay(essaylist[i], scorelist[i]) print("Done2") features=FeatureExtractor() features.initialize_dictionaries(train) X = features.gen_feats(train) print("features train", X) print("Done3") # + ## TESTING FEATURES ## test_set = pd.read_csv('uf_beg_bound_1.csv', sep=',') # test_set = pd.read_csv('test.txt', sep='\t', encoding = 'utf=8') x = test_set.to_numpy() tester = x.tolist() test_scorelist = [] test_essaylist = [] for i in range(0, len(tester)): z = tester[i] y = z[0].split(', ', 1) test_scorelist.append(float(y[0])) test_essaylist.append(y[1]) count = 0 test = EssaySet(essaytype="test") for i in range(0, len(test_essaylist)): test.add_essay(test_essaylist[i], test_scorelist[i]) Y = features.gen_feats(test) print("features test", Y) print("Done4") ## SCALING scaled_train = [] for i in range(0, len(scorelist)): scaled_train.append(float((np.clip((scorelist[i]), a_min=2, a_max=12)/12))) print("start training and prediciton") # + from sklearn.svm import SVR Cs = [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100] # Cs = [0.00001] # gammas = [1] gammas = [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100] param_grid = {'C': Cs, 'gamma' : gammas} clf = GridSearchCV(SVR(kernel='rbf'), param_grid, cv =5) clf.fit(X, scaled_train) print(clf.best_params_) # clf = SVR(C=10, gamma = 0.001, kernel='rbf') # clf.fit(X, scaled_train) final = clf.predict(Y) # print(final) # INVERSE_SCALING finals = np.rint(np.clip(final,a_min=0,a_max=1)*12) ## QWK Score print(quadratic_weighted_kappa(test_scorelist,finals)) ## test_scorelist babble_score gpt_score # -
Model2-EASE/ease/.ipynb_checkpoints/Untitled3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''danlp'': conda)' # name: python38564bitdanlpcondab70fe6e4720d408985175c9cab14fc3e # --- # # # Text classification of speeches from the Danish Parliament # # In this notebook, we show how to perform text classification. # As a concrete example, we train a text classifier to predict the party in which belongs a speech from the Danish Parliament. # # Steps in this tutorial: # # 1. Download the data # 2. Extract the texts from the XML files # 3. Preprocess the data (cleaning) # 4. Prepare the data for training and evaluation # 5. Train a model with fastText # 6. Test and evaluate the performance # # You can restart the tutorial at each step if you have previously saved the models/data. # To reinitialize the notebook with required libraries and variables, run the following cell. # # + # Run this cell for restarting from any step import os import glob from zipfile import ZipFile import xmltodict import timestring import json import fasttext # path to the (default) data folder # where we save/load data and models for this notebook DATA_DIR = "FT-data-DSpace" # paths to the training and test files train_path = os.path.join(DATA_DIR, "train.txt") test_path = os.path.join(DATA_DIR, "test.txt") # - # ## Step 1. Download the Data # # The data we use comes from the `The Danish Parliament Corpus 2009 - 2017, v1` (Hansen, <NAME>, 2018, CLARIN-DK-UCPH Centre Repository). # # The corpus contains transcripts of parliamentary speeches. It consists of 10 XML files (one for each year). XML tags include meetings, item title and number, speeches, name and party of speakers, date, time, etc. # # # 1. Download the data at http://hdl.handle.net/20.500.12115/8 # 2. Unzip the folder : `unzip FT-data-DSpace.zip` # 3. Enter the folder and unzip the files : `cd FT-data-DSpace && unzip '*.zip'` # # ## Step 2. Extract the texts # # For building and testing a model, we need labelled (classified) texts. # Texts are transcripts of speeches; labels/classes are the parties they belong to. # We start by extracting these data from the XML files. # For each parliament year, we want to extract all the speeches that are attached to a party (some speeches have no party attached, we don't keep them in our data) and store them as a list of triplets (year, party, speech). # In the xml file, speeches are stored under the `<EdixiData><Møder><Møde><Dagsordenpunkt><Tale>` tags. The recorded year, name of the party and transcript of the speech are respecctively stored with the `<Starttid>`, `<Parti>` and `<Tekst>` tags. # The structure of an XML file is as follows: # # ~~~ xml # <EdixiData> # <Møder> # <Samling>...</Samling> # <Møde> # <MeetingID>...</MeetingID> # <Location>...</Location> # <DateOfSitting>...</DateOfSitting> # <Mødenummer>...</Mødenummer> # <Dagsordenpunkt> # <Punktnummer>...</Punktnummer> # <Mødetitle>...</Mødetitle> # <Sagstype>...</Sagstype> # <Tale> # <Starttid>...</Starttid> # <Sluttid>...</Sluttid> # <Navn>...</Navn> # <Rolle>...</Rolle> # <Tekst>...</Tekst> # </Tale> # <Tale> # ... # </Tale> # </Dagsordenpunkt> # </Møde> # ... # </Møder> # </EdixiData> # ~~~ # # # We first define a function for extracting this data from one xml file. # def speeches_from_xml(xml_file): from collections import OrderedDict speeches = [] # converting xml structure to dict xml_data = xmltodict.parse(xml_file.read()) xml_data = xml_data['EdixiData']['Møder'] for moder in xml_data: if not isinstance(moder, OrderedDict): continue if not 'Møde' in moder.keys(): continue for meeting in moder['Møde']: if not isinstance(meeting, OrderedDict): continue for dagsordenpunkt in meeting['Dagsordenpunkt']: if not isinstance(dagsordenpunkt, OrderedDict): continue if not 'Tale' in dagsordenpunkt: continue for tale in dagsordenpunkt['Tale']: if not isinstance(tale, OrderedDict): continue if not 'Parti' in tale or not isinstance(tale['Parti'], str): continue if not 'Starttid' in tale or not isinstance(tale['Starttid'], str): continue if not 'Tekst' in tale or not isinstance(tale['Tekst'], str): continue # we only save the year, not the exact date of the speech year = str(timestring.Date(tale['Starttid']).year) party = tale['Parti'] text = tale['Tekst'] if len(text)<1: continue speeches.append({'year': year, 'party': party, 'text': text}) return speeches # We run it on all the XML files, extracting the speeches from 2009 to 2017. # + tags=[] speeches = [] for xml_path in glob.glob(os.path.join(DATA_DIR, "EdixiXMLExport_*.zip")): filename = os.path.splitext(os.path.basename(xml_path))[0] print("Extract texts from ", filename) with ZipFile(xml_path) as xml_zip: with xml_zip.open(filename+'.xml') as xml_file: speeches += speeches_from_xml(xml_file) # + tags=[] print("Example\n-------") print(speeches[0]) print() # listing the years years = sorted(list(set([s['year'] for s in speeches]))) for year in years: print(len([_ for s in speeches if s['year']==year]), " speeches in ", year) print() # listing the parties parties = sorted(list(set([s['party'] for s in speeches]))) for party in parties: print(len([_ for s in speeches if s['party']==party]), " speeches from ", party) # - # We save the data so we can restart the notebook from the preprocessing step. with open(os.path.join(DATA_DIR, "speeches.json"), 'w') as f: f.write(json.dumps(speeches, indent=4)) # ## Step 3. Preprocess (clean) the data # For preprocessing the texts, we use the Danish SpaCy model. # Using this model, we can tokenize and tag the sentences with part-of-speech. # # First, we load the spacy model. # + tags=[] from danlp.models import load_spacy_model nlp = load_spacy_model() # - # We load the data. # + tags=[] speeches = [] with open(os.path.join(DATA_DIR, "speeches.json")) as f: speeches = json.loads(f.read()) # - # And preprocess (clean) the texts by: # - removing punctuation and symbols # - removing stop words and numbers # - lowercasing the tokens # # (This process might take several minutes) # # The purpose of this step is to reduce the vocabulary in order to speed up the training process. It is possible to skip some of the cleaning steps in order to improve the quality of the prediction (e.g., lowercasing might reduce the benefits of using word embeddings). # + tags=["outputPrepend"] from stop_words import get_stop_words da_stopwords = get_stop_words('da') import lemmy.pipe lemmatizer = lemmy.load('da') for speech in speeches: text = speech['text'] doc = nlp(text) pruned = [] lemmas = [] for tok in doc: if tok.tag_ in ["PUNCT", "SYM"]: continue if tok.is_stop or tok.is_digit: continue pruned.append(tok.lower_) lemmas.append(lemmatizer.lemmatize(tok.tag_, tok.lower_)[0]) speech['preprocessed'] = " ".join(pruned) speech['lemmas'] = " ".join(lemmas) speeches[0] # - with open(os.path.join(DATA_DIR, "speeches_pp.json"), 'w') as f: f.write(json.dumps(speeches, indent=4)) # ## Step 4. Prepare the data for training and testing We re-load the preprocessed data. speeches = [] with open(os.path.join(DATA_DIR, "speeches_pp.json")) as f: speeches = json.loads(f.read()) # We split into train and test data. # We will build a model from speeches from 2009 to 2014 and evaluate its performance on the 2015 speeches. # + tags=[] train_data = [(sp['party'],sp['preprocessed']) for sp in speeches if not sp['year'] == '2015'] test_data = [(sp['party'],sp['preprocessed']) for sp in speeches if sp['year'] == '2015'] print("Training data : ", len(train_data), "speeches") print("Test data : ", len(test_data), "speeches") # - # We save the training and test data in a format accepted by fastText : # ``` # __label__class1 text1 # __label__class2 text2 # ... # __label__classN textN # ``` # + tags=[] with open(train_path, 'w') as f: for (p,t) in train_data: f.write("__label__"+p+" "+t+"\n") with open(test_path, 'w') as f: for (p,t) in test_data: f.write("__label__"+p+" "+t+"\n") # - # ## Step 5. Learn a model with fastText # We load the common crawl word embeddings ("`cc.da.wv`") from fastText using Gensim. # If you prefer to use other embeddings from our library, you can have a look at our [page](https://github.com/alexandrainst/danlp/blob/master/docs/models/embeddings.md). # # + tags=[] from danlp.models.embeddings import load_wv_with_gensim wv = load_wv_with_gensim("cc.da.wv") # - # We save the embeddings in a format that is accepted by fastText (i.e. .vec). wv.save_word2vec_format(os.path.join(DATA_DIR, "cc-wv.vec"), binary=False) # We train the model with fastText. We can fine-tune the hyperparameters, e.g.: # - the number of epochs (recommended : from 1 to 50) # - the learning rate (recommended : from 0.1 to 1.0) # - the N-grams length (recommended : from 1 to 5) # + tags=[] import time start_time = time.time() model = fasttext.train_supervised(input=train_path, epoch=20, lr=0.2, dim=100, wordNgrams=2, pretrainedVectors=os.path.join(DATA_DIR, "cc-wv.vec")) print("time :", time.time()-start_time) print("score : ", model.test(test_path)) # - model.save_model(os.path.join(DATA_DIR, "model.bin")) # ## Step 6. Test and Evaluate the performance # Load the model (and test on the test data). # + tags=[] model = fasttext.load_model(os.path.join(DATA_DIR, "model.bin")) model.test(test_path) # - # To make a prediction with the model, you can use the following code (you can replace the text with any (preprocessed) text) : # + tags=[] text = "håber enighed gennemføre saglig seriøs høring hele lovforslaget dets aspekter tror element frank aaen nævner vedkommende volde umiddelbart største problemer" predicted_party = model.predict(text) predicted_party = predicted_party[0][0].split("__")[-1] print(predicted_party) # - # We calculate the accuracy per label to see how the model performs for each party and the micro average accuracy. # + tags=[] import numpy as np from sklearn.metrics import confusion_matrix golds = [] preds = [] with open(test_path) as f: for line in f: line = line.strip() line = line.split(" ", maxsplit=1) text = line[1] gold_party = line[0].split("__")[-1] pred_party = model.predict(text)[0][0].split("__")[-1] golds.append(gold_party) preds.append(pred_party) golds = np.array(golds) preds = np.array(preds) labels = np.unique(golds) cf = confusion_matrix(golds, preds, labels=labels) print("Accuracy per party") for i, label in enumerate(labels): tp = cf[i][i] tt = np.sum(golds == label) print(label, "\t{:.1%}\t({}/{})".format(tp/tt, tp, tt)) print("\nGlobal accuracy") tp = np.sum(preds==golds) tt = len(golds) print("{:.1%}\t({}/{})".format(tp/tt, tp, tt))
examples/tutorials/example_text_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ashahdeen/WEEK-1-PREP-ASSIGNMENT/blob/main/Copy_of_Python_Programming_Data_Types.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="g4atfIoY1BjY" # # Python Programming: Data Types # + [markdown] id="Fti179OmKM7c" # ## 1.1 Introduction # + [markdown] id="_Kl5unZ9KPIY" # There are many different data types in Python. These data types can also be referred to as types of objects. We can start with the most common object types: strings, integers and floats. Anytime you write words (text) in Python, you're using character strings (strings for short). The most common numbers, on the other hand, are integers (e.g. -1, 0, 100) and floats, which represent real numbers (e.g. 3.14, -42.0). # + [markdown] id="F8zU7otKNuU3" # The following are examples of values with different data types; # + id="aLU0ZCM4KKMZ" colab={"base_uri": "https://localhost:8080/"} outputId="447f664a-29fe-4ecf-aec7-2c114efc277e" # Example 1 # Integer Data Type # 55 # + id="dm_ofqvwPQuP" colab={"base_uri": "https://localhost:8080/"} outputId="3d19e022-727c-404a-8c14-e1d54f9b7923" # Example 2 # Float Data Type # 3.142 # + id="Vjla31TDQg4R" # Example 3 # String Data Type # "Moringa Data Science" # + [markdown] id="uqmC6vfr24_f" # ### <font color="green">1.1 Challenges </font> # # + id="IpyXzkyYRnJ1" colab={"base_uri": "https://localhost:8080/"} outputId="b52701e8-c55d-45de-aa23-8ee2e9315c64" # Challenge 1 # Write a value with an Integer data type # 35 # + id="fTkeiMm6Rrhc" colab={"base_uri": "https://localhost:8080/"} outputId="81fd58fc-b185-4eae-acc4-745cce12fc86" # Challenge 2 # Write any value with a Float Data Type # 45.78 # + id="-yxfZtWqRuoA" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="25ba4786-1657-4488-81e3-f2828a00ded3" # Challenge 3 # Write any value with a String Data Type # '<NAME>' # + [markdown] id="UxdDFPQKz3NS" # ## 1.2 Finding Out Types # + [markdown] id="sgBUpOEqRe8d" # We can get Python to tell us the type of an expression by using the built-in type() function. We'll notice that Python refers to integers as int, floats as float, and character strings as str. # + id="GQZq_V9NS09i" # Example 1 # Finding out the type of 55 # type(55) # + id="mKoaAXlNS8pt" # Example 2 # Finding out the type of 3.142 # type(3.142) # + id="55FxpuwtS8So" # Example 3 # Finding out the type of "Moringa Data Science" # type("Moringa Data Science") # + [markdown] id="gqLkQCti2_as" # ### <font color="green">1.2 Challenges</font> # + id="9aeJPHk5RJW2" colab={"base_uri": "https://localhost:8080/"} outputId="1cc473e0-85ba-49af-d160-4a3742c8ad4d" # Challenge 4 # Now lets find out the type of our value in Challenge #1 # type(35) # + id="xpxRzyfHT_VC" colab={"base_uri": "https://localhost:8080/"} outputId="091aa0c6-9d63-47f9-bfee-0217575e6697" # Challenge 5 # What about the type of our value in Challenge #2 # type(45.78) # + id="JSTdIHXdUErB" colab={"base_uri": "https://localhost:8080/"} outputId="fe0e3820-4a2e-410b-b883-2b252a270c48" # Challenge 6 # What about the type of our value in Challenge #3 # type('asha deen') # + id="3A6nP_-XpcTA" colab={"base_uri": "https://localhost:8080/"} outputId="789f44ed-f5dd-45d3-82e9-3becbe490fa9" # Challenge 7 # What is the data type of the result of: 16 / 4? # type(16/4) # + id="_MC1ALIKpnyA" colab={"base_uri": "https://localhost:8080/"} outputId="439edaae-7d32-4fbc-b0e3-e13d31c1fb9d" # Challenge 8 # What is the type of the result of: 6 // 2? (Note the double slash //.) # type(6//2) # + id="dDF33m3swR-Z" colab={"base_uri": "https://localhost:8080/"} outputId="06c4a33f-4618-4ea3-dcba-052a2d5ec876" # Challenge 9 # What do you think is the output of type(a)? Run this cell and check # a = "Learning Data Science is Easy" a = 3 type('a') # + id="2GaZ0Eggw7Iu" colab={"base_uri": "https://localhost:8080/"} outputId="f4fa4c1e-5af2-4194-e8dd-38ba36ced15d" # Challenge 10 # What do you think is the output of this cell? # Think about it then run it. # type(3.2) == float # + [markdown] id="-BkSbASJUdKQ" # ## 1.3 Integers # + [markdown] id="JyxHhUBWUrbg" # Integers can be negative or positive numbers: # + id="BMEZZkF-Gv4s" colab={"base_uri": "https://localhost:8080/"} outputId="9a747244-9d77-4254-a266-7f51d031825b" # Example 1 # Again, we can verify whether this is the case by using the type() function # type(-300) # + [markdown] id="Z6xx7moLqa2B" # ### <font color = "green">1.3 Challenges</font> # + id="dcOMS1Ywn2MG" colab={"base_uri": "https://localhost:8080/"} outputId="1d15beb4-cefb-417d-9b8e-b8f5b342c98b" # Challenge 1 # What happens when we verify a positive number i.e 43 # type(43) # + id="H0Ws1Ez1HqoT" colab={"base_uri": "https://localhost:8080/"} outputId="5144b931-5c23-43c1-cf03-ae2b74f0c158" # Challenge 2 # Let's verify zero below # type(0) # + [markdown] id="a_8-Y8pFUfZ4" # ## 1.4 Floats # + [markdown] id="KK5QqqUFIorK" # Floats represent real numbers; they also include "numbers with decimals". We can now test some examples with the type() function # + id="qLrhOz6eIpZ7" colab={"base_uri": "https://localhost:8080/"} outputId="fde9ed9e-c1d4-455b-b141-855ec44a48ac" # Example 1 # Let's verify 1.0 # type(1.0) # + [markdown] id="Uc1Dx2gDqlQb" # ###<font color="green"> 1.4 Challenges</font> # + id="AkXuR5UXoa4u" colab={"base_uri": "https://localhost:8080/"} outputId="30f3f6c9-985d-45f2-ec63-309201715686" # Challenge 1 # Let's now verify 0.7 # type(0.7) # + id="W-dT0-3WWx7z" colab={"base_uri": "https://localhost:8080/"} outputId="453b9e94-362b-49a6-9ba1-2cd3361219ee" # Challenge 2 # Verifying the type of 0.53 # type(0.53) # + [markdown] id="zH4fP7JMUqPG" # ## 1.5 Boolean # + [markdown] id="Bfvyjf_YmT-w" # Boolean is another important type in Python. An object of type Boolean can take on one of two values: True or False: # # # + id="pLiD-enCmSyO" colab={"base_uri": "https://localhost:8080/"} outputId="685a855a-42fa-4e0f-97ec-3bc7f66ead82" # Example 1 # Let's run this cell and see what happens # True # + id="irtYN0ZZmv5Z" colab={"base_uri": "https://localhost:8080/"} outputId="73e80196-8178-4db7-efe0-dd9615288d89" # Example 2 # Let's also run this cell and see what happens # False # + [markdown] id="u3G52jpj2veu" # ### <font color="green">1.5 Challenges</font> # + id="graRe9NCmioG" colab={"base_uri": "https://localhost:8080/"} outputId="ffcca9fe-b535-428b-ab14-57edaa395496" # Challenge 1 # What happens when we run 'true' in this cell # True # + id="MuyXiaUPmz9U" colab={"base_uri": "https://localhost:8080/"} outputId="0f20004a-9f18-4937-c7fa-a3c73c05d764" # Challenge 2 # What about what happens when we run 'false' in this cell # False # + id="10qmsXWgm5Hk" colab={"base_uri": "https://localhost:8080/"} outputId="ab7161dc-6604-4e00-fef1-6cd862d66c1e" # Challenge 3 # Let's also find out type of False in this cell # type(False) # + [markdown] id="A5_RAp55UkF6" # ## 1.6 Conversion of Types # + [markdown] id="Ajd9s536Ul66" # We can change the type of the object in Python; this is called typecasting. For example, we can convert an integer into a float (e.g. 2 to 2.0). # + id="yJCPR5BEXp3G" colab={"base_uri": "https://localhost:8080/"} outputId="473a8f1d-7934-4383-963a-f7af2bc3bbc4" # Example 1 # Let's first verify that this is an integer by running this cell. # type(2) # + [markdown] id="s547kw_RlMAo" # ### 1.6a. Converting integers to floats # # + id="4mP_uTVYkoKn" colab={"base_uri": "https://localhost:8080/"} outputId="8252435a-f417-4ef8-eb08-2076e32e76bc" # Example 1 # Let's now convert integer 2 to a float and check its type # type(float(2)) # + [markdown] id="YWhO9m4mlAvg" # When we convert an integer into a float, we don't really change the value (i.e., the significand) of the number. However, if we cast a float into an integer, we could potentially lose some information. For example, if we cast the float 1.1 to integer we will get 1 and lose the decimal information (i.e., 0.1): # + id="ghNEvJErk4Vu" colab={"base_uri": "https://localhost:8080/"} outputId="984f2b8d-2dbc-41f8-e704-fabe520df739" # Example 2 # Casting 1.1 to integer will result in loss of information # int(1.1) # + [markdown] id="lgNykfkVlPAq" # ### 1.6b. Converting from strings to integers or floats # # + [markdown] id="rkSTFCOelV-y" # Sometimes, we can have a string that contains a number within it. If this is the case, we can cast that string that represents a number into an integer using int(): # # # + id="qK1OrMJYlSwk" colab={"base_uri": "https://localhost:8080/"} outputId="2ae6e2cb-0c75-4d33-c359-15c1a5c3cf3e" # Example 1 # Let's convert a string into an integer # int('1') # + id="9ixyl-vAljSG" # Example 2 # But if we try to do so with a string that is not a perfect match for a number, we will get an error. # int('1 or 2 people') # + id="1hR-LBq-lvCw" colab={"base_uri": "https://localhost:8080/"} outputId="81cb0e8b-74e8-4eb7-d154-21965653cc97" # Example 3 # We can also convert strings containing floating point numbers into float objects: # float('1.2') # + [markdown] id="-HTYwkHal6e7" # ### 1.6c. Converting numbers to strings # # + [markdown] id="gLlTlzYml8Fu" # If we can convert strings to numbers, it is only natural to assume that we can convert numbers to strings, right? # # # + id="XjEcrrz-mByz" # Example 1 # Let's Convert an integer to a string # str(1) # + [markdown] id="KyVOjrcMr4wI" # #### <font color="green">1.6c Challenges</font> # + id="Rk4YzQRGmH87" # Challenge 1 # And there is no reason why we shouldn't be able to make floats into strings as well: # Converting a float 1.2 to a string # OUR CODE GOES HERE # + id="fnw2YsM8vswc" # Challenge 2 # How do you convert a digit 6/4 into a string? # OUR CODE GOES HERE # + [markdown] id="eib75N_EnVqp" # ### 1.6d. Boolean Casting # We can cast boolean objects to other data types. If we cast a boolean with a value of True to an integer or float we will get a one. If we cast a boolean with a value of False to an integer or float we will get a zero. Similarly, if we cast a 1 to a Boolean, you get a True. And if we cast a 0 to a Boolean we will get a False. Let's give it a try: # # # + id="ggrd56z2rd5u" colab={"base_uri": "https://localhost:8080/"} outputId="37b4bb88-df27-42de-9c7d-10ec90abe0b7" # Example 1 # Let's cast 1 to a Boolean bool(1) # + [markdown] id="1x6Hvs-jrru7" # #### <font color="green">1.6d Challenges</font> # + id="JjfxTMb1n_9z" colab={"base_uri": "https://localhost:8080/"} outputId="47a9e3d5-3e83-49d2-daeb-d6f3cd6de103" # Challenge 1 # Let's convert the value of True to an integer # int(True) # + id="1x5bcRVfoNHW" colab={"base_uri": "https://localhost:8080/"} outputId="818c4074-eceb-4bb5-99df-97459cf60f25" # Challenge 2 # What happens when we cast a 0 to a Boolean? # bool(0) # + id="VrCxNoqXo03X" colab={"base_uri": "https://localhost:8080/"} outputId="d65fbb58-46e4-458b-a336-d0681d4cc51b" # Challenge 3 # Let's cast a boolean with a value of True to an integer # int(True)
Copy_of_Python_Programming_Data_Types.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # %store # %store -r __importRegression __importRegression # + endMonth = 34 fnameTest = '../input/validation/test_' + str(endMonth) + '.csv' fnameTrain = '../input/validation/train_' + str(endMonth) + '.csv' #train = pd.read_csv(fnameTrain) #test = pd.read_csv(fnameTest) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') items = pd.read_csv("../input/items.csv") categories = pd.read_csv("../input/item_categories.csv") print (train.shape, test.shape, items.shape, categories.shape) # - # labels = test.pop('item_cnt_day') train.head() test.head() last_month_sales = train.loc[train.date_block_num == (endMonth-1)] last_month_sales.reset_index(inplace=True) print last_month_sales.shape last_month_sales.head() last_month_sales.pop('date') last_month_sales.pop('date_block_num') last_month_sales.pop('item_price') last_month_sales.pop('index') last_month_sales.head() last_month_sales.item_cnt_day.describe() last_month_sales = last_month_sales.groupby(['shop_id', 'item_id'], as_index=False).agg({'item_cnt_day':sum}) print last_month_sales.shape last_month_sales.head() last_month_sales.drop_duplicates(inplace=True) test.head() # predictions = np.zeros(shape=test.shape[0]) # print predictions.shape # print test.shape[0] # for i in range(test.shape[0]): # #id0 = test['ID'][i] # shop_id0 = test['shop_id'][i] # item_id0 = test['item_id'][i] # #print id0, shop_id0, item_id0 # if shop_id0 in last_month_sales.shop_id.values: # possibles = last_month_sales.loc[last_month_sales.shop_id == shop_id0] # if item_id0 in possibles.item_id.values: # t = possibles.loc[possibles.item_id==item_id0] # predictions[i] = t.item_cnt_day.values[0] # else: # predictions[i] = 0.0 # else: # predictions[i] = 0.0 # test_id = test.pop('ID') # + #train_labels = last_month_sales.pop('item_cnt_day') lgbm = LGBMRegressor() lgbm.fit(last_month_sales, train_labels) train_predictions = lgbm.predict(last_month_sales) predictions = lgbm.predict(test) # - # labels.shape # Clipping predictions in the [0, 20] range for i in range(len(predictions)): if predictions[i] > 20: predictions[i] = 20 if predictions[i] < 0: predictions[i] = 20 # for i in range(len(labels)): # if labels[i] > 20: # labels[i] = 20 # if labels[i] < 0: # labels[i] = 20 # Compute RMSE # np.sqrt(mean_squared_error(labels, predictions)) np.sqrt(mean_squared_error(train_labels, train_predictions)) # ## Submission test_submit = pd.DataFrame({'ID': test_id, 'item_cnt_month': predictions}) print test_submit.shape test_submit.to_csv('lgbm_last_month.csv', index=False) test_submit.head()
script/Correct_validation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Pandas import pandas as pd # SQL Alchemy from sqlalchemy import create_engine # - # Create a connection to the Census database engine = create_engine(f"sqlite:///{database_path}") conn = engine.connect() # Query All Records in the the City Table data=pd.read_sql("show table",conn) data.head() # + # Create a connection to the Zip database # - # Query All Records in the Zip Table # Merge the columns # Combined Data
10-Advanced-Data-Storage-and-Retrieval/1/Activities/04-Stu_ReadAllTheSQLs/Unsolved/.ipynb_checkpoints/Read_All_The_SQLs-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://colab.research.google.com/github/issdl/from-data-to-solution-2021/blob/main/1_data_preparation_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="BUgjvBbysqfv" # # Data preparation #2 # + [markdown] id="thpI4QzOqXO4" # ## Imports # + id="rdp8fzWWqZSA" import os import pandas as pd # + [markdown] id="qzewomAcqbZC" # ## Constants # + id="c10c6968-9b67-4c8b-9cb9-3b58ba0886c3" DATA_DIR = 'data/' CSV_FILE = os.path.join(DATA_DIR, 'nih_chest_xray_single_9c.csv') BB_CSV_FILE = os.path.join(DATA_DIR, 'BBox_List_2017.csv') # + [markdown] id="v2OKZ1tQqgf8" # ## Download the prepared CSV # + colab={"base_uri": "https://localhost:8080/"} id="bff0845e-4127-4d09-aa25-9d7a319a282a" outputId="86b4594d-acf4-4f07-fd5d-e00740ad8dc2" os.makedirs(DATA_DIR, exist_ok=True) if not os.path.exists(CSV_FILE): # ! gdown --id 1gJJ5ZRfRicuxIoBWLYAUnfUs4yhJ97sX -O $CSV_FILE else: print('File exists: {}'.format(CSV_FILE)) if not os.path.exists(BB_CSV_FILE): # ! gdown --id 12fA_VUjnt-tyo1d0M2htyQ5w3-nIAVH_ -O $BB_CSV_FILE else: print('File exists: {}'.format(BB_CSV_FILE)) # + [markdown] id="HUbCg2tDsiwE" # ## Explore the dataset # # Tasks: # # 1. Show `df`'s and `df_bb`'s statistics # + id="456e9277-d13c-400c-b5c4-e14d794dc388" df = pd.read_csv(CSV_FILE) df_bb = pd.read_csv(BB_CSV_FILE) # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="J-NjCkWqsyXF" outputId="b6a8b1fb-c3b1-45d7-8b6e-e0d1af29a4c4" ## Show `df`s statistics # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="_jaT27MRti2E" outputId="19bcf385-2df6-4e4f-8246-9d5418e8b00f" ## Show `df_bb`s statistics # + [markdown] id="pKlxE6aPrx3z" # ## Cleanup # # Tasks: # # 1. Delete unused columns ('Unnamed: 0', 'Follow-up #', 'Patient ID', 'View Position', 'OriginalImage[Width', 'Height]', 'OriginalImagePixelSpacing[x', 'y]'). Resulting `df` should have 4 columns left. # 1. Remove redundant columns from `df_bb` # 1. Rename `Bbox [x` `y` `w` `h]` columns to `bb_x`, `bb_y`, `bb_w`, `bb_h` in `df_bb` # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="S967S-aBr1QO" outputId="a4129bba-de68-4cf5-b256-487ff6011641" ## Delete unused columns ('Unnamed: 0', 'Follow-up #', 'Patient ID', ## 'View Position', 'OriginalImage[Width', 'Height]', 'OriginalImagePixelSpacing[x', 'y]'). ## Resulting `df` should have 4 columns left. # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="lI0_ydP4trx9" outputId="6b7a67b0-379d-4c60-8bf8-cc4fca28720f" ## Remove redundant columns from `df_bb` # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="VuklX7tduYQc" outputId="079de080-8da3-4f03-e48f-24edf2281e2e" ## Rename `Bbox [x` `y` `w` `h]` columns to `bb_x`, `bb_y`, `bb_w`, `bb_h` in `df_bb` # + [markdown] id="E5rTCpO0vf0l" # ## Merge both data frames # # Tasks: # # 1. Merge both dataframes # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="0IeXwajwvpKD" outputId="76048423-95e8-481f-d12d-597ca35c027a" ## Merge both dataframes # + id="XZ1OEii9wTCY" ## Rescale bounding box coordinates # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="LpR7SnwSwrvE" outputId="d30772ec-7604-4408-fd7e-c98e079c001b" df.describe() # + [markdown] id="827ca20d-a9e5-4320-ace1-ebe866bfdba2" # ## Encode categories # # Tasks: # # 1. Add one-hot encoded labels to `df` # + colab={"base_uri": "https://localhost:8080/", "height": 328} id="cfe2f12e-d817-485e-9445-55b9f0fddd46" outputId="568574b2-0646-480f-c849-319de4cc696a" ## Add one-hot encoded labels to `df` # + id="35cd09a9-8088-40ff-9fe6-309ab2bc9bd3" ## combine `onehot` with `df` data frame # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="aX8bxZ_pxYnl" outputId="0a69cc68-b43e-46da-8fd3-d9ab9552c4c1" df.head() # + [markdown] id="6e5614bf-a354-4243-b716-5b8535aa2129" # ## Add column with file paths # # Tasks: # # 1. Add column with file path (data/images/IMAGE_INDEX) # + id="fee24611-e08f-4e4a-afc6-4ceb97cc377c" ## Add column with file path (data/images/IMAGE_INDEX) # + colab={"base_uri": "https://localhost:8080/", "height": 241} id="5275f9ea-4e36-4721-88f9-c837bc79fd88" outputId="8264882c-6738-47d5-bbd8-3162fe1a4480" df.head() # + [markdown] id="7cacb9ad-fa39-45ca-98c5-3152c27e7ea3" # ## Save results # + id="b3f3f6fd-dded-475c-926a-6982962ba66e" df.to_csv(os.path.join(DATA_DIR, 'nih_chest_xray_single_9c_bb_onehot.csv'), index=False)
1_data_preparation_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Lab 3: Spatial data manipulation: Raster # # In this lab, you will analyze the impact of hurricane Harvey, which was happened in August 2017 in Texas. You will find that most of the tasks were covered in the lecture, and it is a reproduction of the tasks. The differences are the study area (Houston instead of Champaign), the index (<a href=https://eos.com/make-an-analysis/ndwi/>NDWI [Normalized Difference Water Index]</a> instead of NDVI [Normalized Difference Vegetation Index]), and the data source (Sentinel 2 instead of Landsat 8). # # ## Structure # ### 1. Import Data (1 point) # **1.1.** (0.5 point) Import and stack four bands collected on **August 15 2017**. Save the result as a format of `ndarray` and name it as `before_harvey`. <br> # **1.2.** (0.5 point) Import and stack four bands collected on **September 1 2017**. Save the result as a format of `ndarray` and name it as `after_harvey`. # ### 2. Display (True / False) Color Images after Harvey (1 point) # **2.1.** (0.5 point) Plot a true color image (R,G,B) after Harvey with `ep.plot_rgb()` method. <br> # **2.2.** (0.5 point) Plot a false color image (NIR,R,G) after Harvey with `ep.plot_rgb()` method. <br> # ### 3. Calculate Normalized Difference Water Index (NDWI) "Before" and "After" Harvey (1 point) # **3.1.** (0.5 point) Calculate NDWI **before** harvey and save it as `ndwi_before`. <br> # **3.2.** (0.5 point) Calculate NDWI **after** harvey and save it as `ndwi_after`. <br> # ### 4. Classify Raster Image based on the NDWI (1 point) # **4.1.** (0.5 point) Classify the values in `ndwi_before` array with `np.digitize()` method, and save the result as `ndwi_before_class`. <br> # **4.2.** (0.5 point) Classify the values in `ndwi_after` array with `np.digitize()` method, and save the result as `ndwi_after_class`. <br> # ### 5. Calculate the area covered by water and its percentage over the entire area (1 point + a) # **5.1.** (0.5 point) Calculate the percentage of area covered by water, **before** Hurricane Harvey, save it as `percent_before_harvey`. <br> # **5.2.** (0.5 point) Calculate the percentage of area covered by water, **after** Hurricane Harvey, save it as `percent_after_harvey`. <br> # **5.3. (Extra 1 point)** Calculate the size of area (in Square KM) which wasn't covered by water before Harvey but covered by water after Harvey. Save the result as `diff_area` with the numerical format. # # ## Notes: # **Before you submit your lab, make sure everything runs as expected WITHOUT ANY ERROR.** <br> # **Make sure you fill in any place that says `YOUR CODE HERE` or `YOUR ANSWER HERE`:** FULL_NAME = "" # + # Import necessary packages import rasterio as rio import numpy as np import os import matplotlib.pyplot as plt import earthpy.plot as ep from matplotlib.colors import ListedColormap data_path = './data' # - # ### 1. Import Data (1 point) # # In the data folder, you will find two sets of Senitinel2 satellite imagery. Each set has four JPEG2000 files, and each file represents the bands correspinding to each color/wavelength. Please refer to the table below and import the files to the appropriate band, accordingly. <br> # # **Name schema: "Sentinel2_{YYYYMMDD}_B{Band Number}.jp2"** # # | Band Number | Description | Resolution | # | :-: | :-: | :-: | # | Band 2 | Visible blue | 10 meter | # | Band 3 | Visible green | 10 meter | # | Band 4 | Visible red | 10 meter | # | Band 8 | Near Infrared | 15 meter | # # **1.1.** (0.5 point) Import and stack four bands collected on **August 15 2017**. Save the result as a format of `ndarray` and name it as `before_harvey`. <br> # **1.2.** (0.5 point) Import and stack four bands collected on **September 1 2017**. Save the result as a format of `ndarray` and name it as `after_harvey`. # + # Your code here (Task 1.1) # + # Your code here (Task 1.2) # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" assert type(before_harvey) == np.ndarray assert type(after_harvey) == np.ndarray assert before_harvey.shape == (4, 1254, 2328) # (Bands, Rows, Columns) assert after_harvey.shape == (4, 1254, 2328) # (Bands, Rows, Columns) print('Success!') # - # ### 2. Display (True / False) Color Images after Harvey (1 point) # # **2.1.** (0.5 point) Plot a true color image (R,G,B) after Harvey with `ep.plot_rgb()` method. <br> # **2.2.** (0.5 point) Plot a false color image (NIR,R,G) after Harvey with `ep.plot_rgb()` method. <br> # # **You are expected to see the two images below. But it doesn't have to be executed in one cell.** # ![True and False Color representation of Satellite image](./data/task2_example.png) # # + # Your code here # - # ### 3. Calculate Normalized Difference Water Index (NDWI) "Before" and "After" Harvey (1 point) # # Use the equation below with NumPy and calculate NDWI for each period. The index represents the water body level on the surface against soil and vegitation. # # \begin{gather*} # NDWI = \frac{Green - NIR}{Green + NIR} # \end{gather*} # # **Note**: Use the `normalize` function below to normalize values in each band. # # **3.1.** (0.5 point) Calculate NDWI **before** harvey and save it as `ndwi_before`. <br> # **3.2.** (0.5 point) Calculate NDWI **after** harvey and save it as `ndwi_after`. <br> def normalize(array): ''' normalize: normalize a numpy array so all value are between 0 and 1 ''' array_min, array_max = array.min(), array.max() return (array - array_min) / (array_max - array_min) # + # Your code here (Before Harvey) # + # Your code here (After Harvey) # - # **Check Your Result Here** You should see the images below if you write the code properly. # # ![](./data/task3_example.png) # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 10)) ax1.imshow(ndwi_before, cmap='Greys_r') ax2.imshow(ndwi_after, cmap='Greys_r') ax1.set_title('Before Harvey') ax2.set_title('After Harvey') plt.show() # - # ### 4. Classify Raster Image based on the NDWI (1 point) # # Now, you will classify the imagery of two periods (before and after Harvey) to examine the impact of flooding. Based on the table below, I provided `bounds`, `ndwi_colors`, `ndwi_cmap`, and `ndwi_names`. These values will help you to plot the classification result in the last cell of this section. # # | NDWI Range | Meaning | # | :-: | :-: | # | 0.2 ~ 1 | Water surface | # | 0.0 ~ 0,2 | Flooding| # | -0.3 ~ 0.0 | Moderate drought | # | -1 ~ -0.3 | Drought | # # **4.1.** (0.5 point) Classify the values in `ndwi_before` array with `np.digitize()` method, and save the result as `ndwi_before_class`. <br> # **4.2.** (0.5 point) Classify the values in `ndwi_after` array with `np.digitize()` method, and save the result as `ndwi_after_class`. <br> # **Note**: Be aware that you need to remove `nan` value in each array with `np.nan_to_num()` method. Convert `nan` to `-1` for our example here. # Run this cell before running your code. bounds = np.array([-2, -0.3, 0, 0.2, 2]) # Define boundary ndwi_colors = ["grey", "lightblue", "blue", "darkblue"] # Define color map ndwi_cmap = ListedColormap(ndwi_colors) # Define color map ndwi_names = ["Drought", "Moderate drought", "Flooding", "Water Surface"] # Define class names # + # Your code here # - # **Check Your Result Here** You should see the images below if you write the code properly. # # ![](./data/task4_example.png) # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(20, 10)) newi_ax1 = ax1.imshow(ndwi_before_class, cmap=ndwi_cmap) newi_ax2 = ax2.imshow(ndwi_after_class, cmap=ndwi_cmap) ep.draw_legend(im_ax=newi_ax2, classes=np.unique(ndwi_after_class), titles=ndwi_names) ax1.set_title('Before Harvey') ax2.set_title('After Harvey') plt.show() # - # ### 5. Calculate the area covered by water and its percentage over the entire area (1 point + a) # # **5.1.** (0.5 point) Calculate the percentage of area covered by water, **before** Hurricane Harvey, save it as `percent_before_harvey`. <br> # **5.2.** (0.5 point) Calculate the percentage of area covered by water, **after** Hurricane Harvey, save it as `percent_after_harvey`. # # **5.3. (Extra 1 point)** Calculate the size of area (in Square KM) which wasn't covered by water before Harvey but covered by water after Harvey. Save the result as `diff_area` with the numerical format. <br> # **Note:** the cell size (resolution) of Sentinel2 is 10 meters. # + # Your code here # Your code here (Extra Point) # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" assert round(percent_before_harvey, 2) == 0.03 assert round(percent_after_harvey, 2) == 0.41 print('Success!') # + """ Test code for the previous code. This cell should NOT give any errors when it is run.""" assert round(diff_area) == 111 or round(diff_area) == 112 print('Congraculations! You earned extra point!') # - # ### *You have finished Lab 3: Spatial data manipulation: Raster* # Please name your jupyter notebook as `GEOG489_Lab1_[YOUR_NET_ID].ipynb`, and upload it to https://learn.illinois.edu.
Labs/Lab3/Lab3_spatial_data_manipulation_raster.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project # # The project's objetive will be to create an scrapper that checks the portfolio section of the webpage. It will be divided into three main sections: # # * Scrapper: This section will retrieve an image from the website section that belongs to the portfolio # * Image Checker: This section will check the image retrieved by the scrapper with a previous expected template # * Checker: Our main module, this section is intended to implement the main logic of our website tester # # ## Scrapper # import packages from selenium import webdriver # Create a browser instance and gets the web page browser = webdriver.Firefox() browser.get("file:///home/jota/repositorios/pymedellin/websites/magnum-template/index.html") # The goal is to check the portfolio section, so let's look all the elements with the class 'page-scroll' page_scroll_objects = browser.find_elements_by_class_name("page-scroll") len(page_scroll_objects) # As there are 5 elements, we need to loop over them to know which one is the portfolio link button for obj in page_scroll_objects: print(obj.text) # + portfolio = None for obj in page_scroll_objects: if obj.text == "PORTFOLIO": portfolio = obj break # Check the type of portfolio, it should be a selenium object type(portfolio) # + # Makes click on the element portfolio.click() # - # Takes a screencapture browser.save_screenshot("screencaptures/portfolio_template.png") # Nice, you have created a scrapper to retrieve information from the website. Now let's compile all of this in a function to make easier to call it later # + # Function to create the scrapper of the navbar from selenium import webdriver import time def scrapper_navbar(url, section_name, path_to_store, template=False): '''@url: The url of the website @section_name: The navbar section name @path_to_store: The path where the images will be stored @template: If True, saves a template with the section name ''' browser = webdriver.Firefox() browser.get("file:///home/jota/pypereira/websites/magnum-template/index.html") page_scroll_objects = browser.find_elements_by_class_name("page-scroll") section = None for obj in page_scroll_objects: if obj.text == "PORTFOLIO": section = obj break # Check the type of portfolio, it should be a selenium object if section is None: print("[Error getting the navbar section]") return None # Makes click on the button section section.click() time.sleep(5) # Conditional to create a template if template: file_path = "{}/template_{}.png".format(path_to_store, section_name.lower()) result = browser.save_screenshot(file_path) browser.quit() return result file_path = "{}/test_{}.png".format(path_to_store, section_name.lower()) result = browser.save_screenshot(file_path) browser.quit() return result # + # Tests your function url = "file:///home/jota/pypereira/websites/magnum-template/index.html" section_name = "PORTFOLIO" path_to_store = "/home/jota/pypereira/screencaptures/" # Let's create a template scrapper_navbar(url, section, path_to_store, template=True) # + # Let's create a testing image scrapper_navbar(url, section, path_to_store) # - # You should have now two images that belongs to the portfolio testing and template, nice! :) # # # Image Checker # # Now let's create an image checker with the OpenCV concepts that we have learned def image_comparator(path_to_store, section_name): ''' @section_name: The navbar section name @path_to_store: The path where the images will be stored ''' template_path = "{}/template_{}.png".format(path_to_store, section_name.lower()) testing_path = "{}/test_{}.png".format(path_to_store, section_name.lower()) template = cv2.imread(template_path) testing = cv2.imread(testing_path) if template is None or testing is None: print("[ERROR] could not load images, please check your section name") return None template_gray = cv2.cvtColor(template, cv2.COLOR_RGB2GRAY) testing_gray = cv2.cvtColor(testing, cv2.COLOR_RGB2GRAY) # Applies a bitwise XOR operation that will return one only if some pixel is different xor = np.bitwise_xor(template_gray, testing_gray) ones = cv2.countNonZero(xor) return ones > 0 # This function compares if two images (our test image and the template) are equals or not. First we attempt to load the images giving as argument the path and the section to analyze. The function assumes that you will have two images with the section name that begin with 'template_' and 'test_' respectively. # # Returns True if any difference is found. def store_differences(path_to_store, section_name): ''' @section_name: The navbar section name @path_to_store: The path where the images will be stored ''' template_path = "{}/template_{}.png".format(path_to_store, section_name.lower()) testing_path = "{}/test_{}.png".format(path_to_store, section_name.lower()) template = cv2.imread(template_path) testing = cv2.imread(testing_path) if template is None or testing is None: print("[ERROR] could not load images, please check your section name") return None # Find differences result = cv2.absdiff(template, testing) gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY) ret, thresh = cv2.threshold(gray, 1, 255, 0) # Find contours cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[1] cv2.drawContours(result, cnts, -1, (0, 0, 255), 1) # Store the differences file_path = "{}/differences_{}.png".format(path_to_store, section_name.lower()) return cv2.imwrite(file_path, result) # This function was taken literally from our OpenCV lesson, it loads both images to be read, applieas absolute differences and find and draw the contours of them. Finally, it stores the differences mask. # ## Checker # # Let's finish implementing our main algorithm def checker(url, section_name, path_to_store, template=False): ''' @url: The url of the website @section_name: The navbar section name @path_to_store: The path where the images will be stored @template: If True, saves a template with the section name ''' # Creates a template if template: print("[INFO] Attempting to store template") result = scrapper_navbar(url, section_name, path_to_store, template=template) if not result or result is None: print("[ERROR] Could not save your template") return result # Creates a test image test = scrapper_navbar(url, section, path_to_store) if not test or test is None: print("[ERROR] Could not save your test image") return result # Compares images result = image_comparator(path_to_store, section_name) if result: # Images are different print("[INFO] Images are different, storing difference mask") stored = store_differences(path_to_store, section_name) if not stored: print("[ERROR] could not store the difference mask") return stored print("[INFO] difference mask stored") return stored print("[INFO] Images are equal, your deployment did not affect your website!") return result # + # let's test the checker url = "file:///home/jota/pypereira/websites/magnum-template/index.html" section_name = "PORTFOLIO" path_to_store = "/home/jota/pypereira/screencaptures/" checker(url, section_name, path_to_store, template=False) # - # Now go to the magnum website folder and open the index.html file in a text editor. Go to line 95 and edit the portfolio header, change it for any word that you wish, I will change it for 'portafolio' which is not an english word. Run the scrapper again and test if it finds this small but not desired difference checker(url, section_name, path_to_store, template=False) # Below the result image # # <img src="screencaptures/differences_portfolio_test_1.png"> # Now let's make another test, edit the line 95 of index.html again and let the header again as 'portfolio'. # # Go to the CSS folder and open the 'style.css' file. Edit the line 11 and change the color to ```#777```, this is a small change but again we want to retrieve ANY change in our deployments checker(url, section_name, path_to_store, template=False) # Below the result image # # <img src="screencaptures/differences_portfolio_test_2.png"> # # Nice, you know the basics about how to use OpenCV and Selenium for scrapper and testing purposes!! :)
Scrapper and Checker.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp utils.model # - # # UtilsModel # # > API details. # + # %load_ext autoreload # %autoreload 2 import matplotlib as mpl # %matplotlib inline # + #export from functools import partial import re import matplotlib.pyplot as plt from matplotlib.ticker import LogLocator, NullFormatter import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torchvision.models as models from torch.autograd import Variable import pretrainedmodels import pytorch_lightning as pl from pytorch_lightning.core import LightningModule from pytorch_lightning.metrics import functional as FM from isic.layers import LabelSmoothingCrossEntropy, LinBnDrop, AdaptiveConcatPool2d, sigmoid, sigmoid_, norm_types, cond_init from isic.callback.freeze import FreezeCallback, UnfreezeCallback from isic.utils.core import first # + #export def set_require_grad(p, b): if getattr(p, 'force_train', False): p.requires_grad_(True) return p.requires_grad_(b) def freeze_to(n, model, n_groups): frozen_idx = n if n >= 0 else n_groups + n if frozen_idx >= n_groups: #TODO use warnings.warn print(f"Freezing {frozen_idx} groups; model has {n_groups}; whole model is frozen.") for ps in model.get_params(split_bn=False)[n:]: for p in ps: # require_grad -> True set_require_grad(p, True) for ps in model.get_params(split_bn=False)[:n]: for p in ps: # require_grad -> False set_require_grad(p, False) def freeze(model, n_groups): assert(n_groups>1) freeze_to(-1, model, n_groups) def unfreeze(model, n_groups): freeze_to(0, model, n_groups) # + #export def get_num_ftrs(model, cut): # TODO: Handle if used models using 1 channel c_in, h, w = 3, 64, 64 modules = list(model.children())[:cut] test = nn.Sequential(*modules) x = torch.rand(1 , c_in, h, w) out = test.eval()(x) return out.shape[1] def params(m): "Return all parameters of `m`" return list(m.parameters()) def has_pool_type(m): def _is_pool_type(l): return re.search(r'Pool[123]d$', l.__class__.__name__) "Return `True` if `m` is a pooling layer or has one in its children" if _is_pool_type(m): return True for l in m.children(): if has_pool_type(l): return True return False # - #export def create_head(n_in, n_out, lin_ftrs=None, p=0.5, concat_pool=True): n_in = n_in * (2 if concat_pool else 1) lin_ftrs = [n_in, 512, n_out] if lin_ftrs is None else [n_in] + lin_ftrs + [n_out] p_dropouts = [p/2] * (len(lin_ftrs) - 2) + [p] activations = [nn.ReLU(inplace=True)] * (len(lin_ftrs) - 2) + [None] pool = AdaptiveConcatPool2d() if concat_pool else nn.AdaptiveAvgPool2d(1) layers = [pool, nn.Flatten()] for ni, no, p, actn in zip(lin_ftrs[:-1], lin_ftrs[1:], p_dropouts, activations): layers += LinBnDrop(ni, no, bn=True, p=p, act=actn) return nn.Sequential(*layers) #export def create_body(arch): from isic.hook import num_features_model def _xresnet_split(m): return [params(m[0][:3]), params(m[0][3:]), params(m[1:])] def _resnet_split(m): return [params(m[0][:6]), params(m[0][6:]), params(m[1:])] def _squeezenet_split(m): return [params(m[0][0][:5]), params(m[0][0][5:]), params(m[1:])] def _densenet_split(m:nn.Module): return [params(m[0][0][:7]), params(m[0][0][7:]), params(m[1:])] def _vgg_split(m:nn.Module): return [params(m[0][0][:22]), params(m[0][0][22:]), params(m[1:])] def _alexnet_split(m:nn.Module): return [params(m[0][0][:6]), params(m[0][0][6:]), params(m[1:])] def _norm_split(m): return [params(m[0]), params(m[1])] if isinstance(arch, str): model = getattr(models, arch)(pretrained=True) if model is None: model = getattr(pretrainedmodels, arch)(num_classes=1000, pretrained='imagenet') if 'xresnet' in arch: cut = -4 split = _xresnet_split elif 'resnet' in arch: cut = -2 split = _resnet_split elif 'squeeze' in arch: cut = -1 split = _squeezenet_split elif 'dense' in arch: cut = -1 split = _densenet_split elif 'vgg' in arch: cut = -2 split = _vgg_split elif 'alex' in arch: cut = -2 split = _alexnet_split else: ll = list(enumerate(model.children())) cut = next(i for i,o in reversed(ll) if has_pool_type(o)) split = _norm_split body = nn.Sequential(*list(model.children())[:cut]) else: model = arch ll = list(enumerate(model.children())) cut = next(i for i,o in reversed(ll) if has_pool_type(o)) split = _norm_split body = nn.Sequential(*list(model.children())[:cut]) num_ftrs = num_features_model(body) return body, split, num_ftrs # + #export def apply_leaf(m, f): "Apply `f` to children of `m`." c = m.children() if isinstance(m, nn.Module): f(m) for l in c: apply_leaf(l,f) # Cell def apply_init(m, func=nn.init.kaiming_normal_): "Initialize all non-batchnorm layers of `m` with `func`." apply_leaf(m, partial(cond_init, func=func)) # - #export def get_bias_batchnorm_params(m, with_bias=True): "Return all bias and and BatchNorm params" if isinstance(m, norm_types): return list(m.parameters()) res = [] for c in m.children(): r = get_bias_batchnorm_params(c, with_bias) res += r if with_bias and getattr(m, 'bias', None) is not None: res.append(m.bias) return res # + #export def print_grad_block(ms): """ This version still print block module """ for m in ms.children(): r = [] print(m) for p in m.parameters(): if hasattr(p, 'requires_grad'): r.append(p.requires_grad) print(r) def check_attrib_module(ms, attribs=['requires_grad', 'skip_wd']): """ This version only print the smallest module """ for m in ms.children(): if len(list(m.children()))>0: check_attrib_module(m, attribs) continue print(m) r = [] for name, p in m.named_parameters(): for attr in attribs: if hasattr(p, attr): r.append(name + '-' + attr + '-'+ str(getattr(p, attr))) print(r) def get_module_with_attrib(model, attrib='requires_grad'): for n, p in model.named_parameters(): if getattr(p, attrib, False): print(n) # - #export def plot_lr_loss(lrs, losses): fig, ax = plt.subplots(1,1) ax.plot(lrs, losses) ax.set_xscale('log') ax.xaxis.set_major_locator(LogLocator(base=10, numticks=12)) locmin = LogLocator(base=10.0,subs=np.arange(2, 10, 2)*.1,numticks=12) ax.xaxis.set_minor_locator(locmin) ax.xaxis.set_minor_formatter(NullFormatter()) return fig, ax #export def lr_find(model, dm, min_lr=1e-7, max_lr=1., n_train=100, exp=True, cpu=False, fast_dev_run=False, skip_last=5, verbose=False): args = {} lr_finder=None if not cpu: args = { "gpus": 1, "precision": 16 } if fast_dev_run: trainer = pl.Trainer(fast_dev_run=True, **args) trainer.fit(model, dm) else: trainer = pl.Trainer(max_epochs=1, **args) lr_finder = trainer.tuner.lr_find(model, dm.train_dataloader(), dm.val_dataloader(), min_lr=min_lr, max_lr=max_lr, num_training=n_train, mode='exponential' if exp else 'linear', early_stop_threshold=None) # Inspect results lrs, losses = lr_finder.results['lr'][:-skip_last], lr_finder.results['loss'][:-skip_last] fig, ax = plot_lr_loss(lrs, losses) opt_lr = lr_finder.suggestion() ax.plot(lrs[lr_finder._optimal_idx], losses[lr_finder._optimal_idx], markersize=10, marker='o', color='red') ax.set_ylabel("Loss") ax.set_xlabel("Learning Rate") print(f'LR suggestion: {opt_lr:e}') if verbose: print('Optimizer Information:') print(trainer.optimizers[0]) print('='*88) print(('*'*30)+'Check requires_grad/ skip_wd' + ('*'*30)) print(('-'*40)+' BODY ' + ('-'*40)) check_attrib_module(model.model[0]) print(('*'*40)+' HEAD ' + ('*'*40)) check_attrib_module(model.model[1]) return lr_finder #export class ParameterModule(nn.Module): "Register a lone parameter `p` in a module." def __init__(self, p): self.val = p def forward(self, x): return x # + # a is lone parameter class TstModule(nn.Module): def __init__(self): super().__init__() self.a,self.lin = nn.Parameter(torch.randn(1)),nn.Linear(5,10) test = TstModule() list(test.children()) # - #export def _has_children(m): try: next(m.children()) except StopIteration: return False return True # + #export def has_params(m): "Check if `m` has at least one parameter" return len(list(m.parameters())) > 0 def total_params(m): "Give the number of parameters of a module and if it's trainable or not" params = sum([p.numel() for p in m.parameters()]) trains = [p.requires_grad for p in m.parameters()] return params, (False if len(trains)==0 else trains[0]) # - #export nn.Module.has_children = property(_has_children) # + #export def children_and_parameters(m): "Return the children of `m` and its direct parameters not registered in modules." children = list(m.children()) children_p = sum([[id(p) for p in c.parameters()] for c in m.children()],[]) for p in m.parameters(): if id(p) not in children_p: children.append(ParameterModule(p)) return children def flatten_model(m): "Return the list of all submodules and parameters of `m`" return sum(map(flatten_model,children_and_parameters(m)),[]) if m.has_children else [m] # - #export def in_channels(m): "Return the shape of the first weight layer in `m`." for l in flatten_model(m): if getattr(l, 'weight', None) is not None and l.weight.ndim==4: return l.weight.shape[1] raise Exception('No weight layer') #export def one_param(m): "First parameter in `m`" return first(m.parameters()) #export def log_metrics_per_key(logger, metrics): keys = ['<KEY> 'vasc'] for m_k, v in metrics.items(): for i,k in enumerate(keys): logger.log(f"val_{m_k}_{k}", v[i], prog_bar=True) logger.log(f"val_{m_k}_{k}", v[i], prog_bar=True) #export class FocalLoss(nn.Module): def __init__(self, class_num, alpha=None, gamma=2, size_average=True): super(FocalLoss, self).__init__() if alpha is None: self.alpha = Variable(torch.ones(class_num, 1)) else: if isinstance(alpha, Variable): self.alpha = alpha else: self.alpha = Variable(alpha) self.gamma = gamma self.class_num = class_num self.size_average = size_average def forward(self, inputs, targets): device = inputs.device N = inputs.size(0) C = inputs.size(1) P = F.softmax(inputs) class_mask = inputs.data.new(N, C).fill_(0) class_mask = Variable(class_mask) ids = targets.view(-1, 1) class_mask.scatter_(1, ids.data, 1.) #print(class_mask) if inputs.is_cuda and not self.alpha.is_cuda: self.alpha = self.alpha.to(device) alpha = self.alpha[ids.data.view(-1)] probs = (P*class_mask).sum(1).view(-1,1) log_p = probs.log() #print('probs size= {}'.format(probs.size())) #print(probs) batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p #print('-----bacth_loss------') #print(batch_loss) if self.size_average: loss = batch_loss.mean() else: loss = batch_loss.sum() return loss from nbdev.export import * notebook2script('utils_model.ipynb')
nbs/utils_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from sklearn import metrics import ch10util import dautil as dl from IPython.display import HTML context = dl.nb.Context('precision_recall') lr = dl.nb.LatexRenderer(chapter=10, context=context) lr.render(r'\text{Accuracy}=\frac{T_p+T_n}{T_p+T_n+F_p+F_n}') lr.render(r'\text{Precision}=\frac{T_p}{T_p+F_p}') lr.render(r'\text{Recall}=\frac{T_p}{T_p+T_n}') lr.render(r'F_1 = 2 \cdot \frac{\mathrm{precision} \cdot \mathrm{recall}}{\mathrm{precision} + \mathrm{recall}}') y_test = np.load('rain_y_test.npy') accuracies = [metrics.accuracy_score(y_test, preds) for preds in ch10util.rain_preds()] precisions = [metrics.precision_score(y_test, preds) for preds in ch10util.rain_preds()] recalls = [metrics.recall_score(y_test, preds) for preds in ch10util.rain_preds()] f1s = [metrics.f1_score(y_test, preds) for preds in ch10util.rain_preds()] # %matplotlib inline dl.options.mimic_seaborn() dl.nb.RcWidget(context) dl.nb.LabelWidget(2, 2, context) # + sp = dl.plotting.Subplotter(2, 2, context) ch10util.plot_bars(sp.ax, accuracies) sp.label() ch10util.plot_bars(sp.next_ax(), precisions) sp.label() ch10util.plot_bars(sp.next_ax(), recalls) sp.label() ch10util.plot_bars(sp.next_ax(), f1s) sp.label() sp.fig.text(0, 1, ch10util.classifiers()) HTML(sp.exit()) # -
Module2/Python_Data_Analysis_code/Chapter 10/precision_recall.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center><h1>Python Pandas Tutorial</h1><center> # ![Pandas image](https://pandas.pydata.org/_static/pandas_logo.png) # ## Pandas is Python Data Analysis Library # # [Pandas](https://pandas.pydata.org "https://pandas.pydata.org/") is an open source, BSD-licensed(can use for commercial means) library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language # * Widely used # * Open Source # * Active Development # * Great Documentation # # Home Page: http://pandas.pydata.org/ # # Using Documentation from: http://pandas.pydata.org/pandas-docs/stable/ # # Fantastic Cheat Sheet: http://pandas.pydata.org/Pandas_Cheat_Sheet.pdf # # Best book by Panda's creator <NAME> (2nd Edition 2017): http://shop.oreilly.com/product/0636920050896.do import pandas as pd # + # Pandas is a big package took a while... # - import numpy as np # another big library with various numeric functions import matplotlib.pyplot as plt # # Panda's two fundamental data structures: Series and DataFrame. # ### Series # A Series is a one-dimensional array-like object containing a sequence of values ( # similar types to NumPy types) and an associated array of data labels - index. # Simplest Series is from an array of data. # + # Let's create some Series! # - s = pd.Series([1,4,3.5,3,np.nan,0,-5]) s s+4 s2 = s * 4 s2 s2**2 # + ### Often you want Series with an index identifying each data point with a label # - labeledSeries = pd.Series([24, 77, -35, 31], index=['d', 'e', 'a', 'g']) labeledSeries # + # The same result as above. Arguments order can be changed but then variable 'data' must be used. labeledSeries = pd.Series(index=['d', 'e', 'a', 'g'], data=[24, 77, -35, 31]) labeledSeries # - labeledSeries.g # + ## A bit similar to dictionary isn't it? # - labeledSeries['g'] labeledSeries.index labeledSeries.values labeledSeries[['a','d']] # NOTE double list brackets!! Using index labels. labeledSeries[[0, 3, 1]] # NOTE double list brackets!! Using indices. labeledSeries[0:3] # NOTE single list brackets!! Using slice. labeledSeries[labeledSeries > 30] # NOTE there is builtin for loop and if statement in list brackets!! Returns all True items. # + # So Series is a fixed-length, ordered dictionary with extra helper methods # - 'd' in labeledSeries 77 in labeledSeries.values # + # Can create series from dictionary by simply passing to constructor pd.Series(mydict) # - citydict = {'Riga': 650000, 'Tukums':20000, 'Ogre': 25000, 'Carnikava': 3000} citydict cseries = pd.Series(citydict) cseries ## Overwriting default index clist = ['Jurmala', 'Riga', 'Tukums', 'Ogre', 'Daugavpils'] cseries2 = pd.Series(citydict, index = clist) cseries2 # + # notice Carnikava was lost, since our index did not have it! # and order was preserved from the given index list! # - # Check for missing data cseries2.isnull() # + # I printed both series again for next example clarity. print(cseries, '\n') print(cseries2) # - cseries3 = cseries + cseries2 cseries3 # + # So NaN + number = NaN # - cseries.name = "Latvian Cities" cseries.index.name = "City" cseries cseries.index cseries.index = ['CarnikavaIsNotaCity','OgreEatsHumans', 'RigaIsOld', 'TukumsSmukums'] cseries # + # Notice that index name was lost after renaming. # - # Series values are mutable cseries['RigaIsOld']=625000 cseries # + # How to rename individual index elements? # This deos not work. cseries.index[2]='RigaIsOldButFantastic' cseries # + # We use Renaming method to rename individual elements # - cseries.rename(index={'RigaIsOld':'RigaRocks'}) # Integer Indexes # Working with pandas objects indexed by integers is something that often trips up # new users due to some differences with indexing semantics on built-in Python data # structures like lists and tuples. For example, you might not expect the following code # to generate an error: # # # Series can no tbe indexed like lists or tuples if index is not specified or it is an integer. Following code does not work. ser = pd.Series(np.arange(3.)) ser ser[-1] # In this case, pandas could “fall back” on integer indexing, but it’s difficult to do this in # general without introducing subtle bugs. Here we have an index containing 0, 1, 2, # but inferring what the user wants (label-based indexing or position-based) is difficult: # ser # + ## With a non-integer index there is no potential for ambiguity: # - ser2 = pd.Series(np.arange(3.), index=['a', 'b', 'c']) ser2[-1] ser2[::-1] ## To keep things consistent, if you have an axis index containing integers, data selection ##will always be label-oriented. For more precise handling, use loc (for labels) or iloc ## (for integers): ser[:2] ser.loc[:1] ser.iloc[:2] cseries.iloc[:2] cseries.loc['CarnikavaIsNotaCity'] len(ser) ser.iloc[:1] # * loc gets rows (or columns) with particular labels from the index. # # * iloc gets rows (or columns) at particular positions in the index (so it only takes integers). # # Date Range creation # + # Delault frquency is "D" for day. dates = pd.date_range('20180521', periods=15) dates # - dates = pd.date_range('20180521', periods=15, freq='W') dates timeseries = pd.Series(np.arange(10), index=pd.date_range('20190117', periods=10)) timeseries # + # Exercise for myself: Create series with all mondays in year 2019. Indices for every week must be integers starting form 1. # - year2019weeks = pd.Series(pd.date_range('20190101', periods=52, freq='W-MON'), index=np.arange(1, 53)) year2019weeks # ## DataFrame # # A DataFrame represents a rectangular table of data and contains an ordered collection of columns. # # Each can be a different value type (numeric, string, # boolean, etc.). # # The DataFrame has both a row and column index; # # Think of it # as a dict of Series all sharing the same index. # # Underneath data is stored as one or more two-dimensional blocks (similar to ndarray) rather than a list, dict, or some other collection of # one-dimensional arrays. # + # Many ways of Data Frame creation # One Common way is # from a dict of equal-length lists or NumPy arrays # - data = {'city': ['Riga', 'Riga', 'Riga', 'Jurmala', 'Jurmala', 'Jurmala'], 'year': [1990, 2000, 2018, 2001, 2002, 2003], 'popul': [0.9, 0.75, 0.62, 0.09, 0.08, 0.06]} df = pd.DataFrame(data) df df2 = pd.DataFrame(data, columns=['year','city', 'popul','budget']) df2 # + # missing column simply given Nans # - df2['budget']=300000000 print(df2) df2['preschool']=list(range(2,8)) df2 df2['budget']=[300000, 250000, 400000, 200000, 250000, 200000] # need to pass all values df2 page = pd.read_html('http://1192.168.3.11/drivers_order_by/average_finish_place/') page page = pd.read_html('https://www.ss.com/lv/transport/cars/volkswagen/') page # + # Many ways of changing individual values ## Recommended way of changing in place (same dataframe) # - df2.iat[3,2]=0.001 df2 # delete column by its name del df2['budget'] df2 dates df = pd.DataFrame(np.random.rand(15,5) * 100, index=dates, columns=list('ABCDE')) # We passed 15 rows of 5 random elements and set index to dates and columns to our basic list elements df df2 = pd.DataFrame({ 'A' : 1., 'B' : pd.Timestamp('20130102'), 'C' : pd.Series(1, index=list(range(4)), dtype='float32'), 'D' : np.array([3] * 4, dtype='int32'), 'E' : pd.Categorical(["test","train","test","train"]), 'F' : 'foo' }) df2 # + # most columns need matching length! # - df3 = pd.DataFrame({ 'A' : 1., 'B' : pd.Timestamp('20180523'), 'C' : s, 'D' : [x**2 for x in range(7)], 'E' : pd.Categorical(['test','train']*3+["train"]), 'F' : 'aha' }) df3 df3[['A', 'B', 'C']][2:5] # NOTE that in pands data filtering, columns come before rows unlike in numpy. df3.iloc[2:5, :3] # + ## different datatypes for columns! # - df3.dtypes df3.head() # default is first 5 df3.tail(3) # default is last 5 df.index df3.index df3.values df3.describe() import seaborn as sb # graphics plotting library sb.pairplot(df3.dropna(), hue='E') # + # Transpose # - df3.T df.sort_index(axis=1,ascending=True) # + ## Sort by Axis in reverse # - df.sort_index(axis=1,ascending=False) df3.sort_values(by='C') # + # Notice NaN gets last # - # ### Selection # # Note While standard Python / Numpy expressions for selecting and setting are intuitive and come in handy for interactive work, for production code, we recommend the optimized pandas data access methods, .at, .iat, .loc and .iloc. df3['D'] df3.D #same as above! Syntactic Sugar! df3[:5] df3[2:5] df3[2:5:2] df3[::-1] # ## Selection by Label # # For getting a cross section using a label: df df.loc[dates[0]] df.loc[dates[2:5]] # + ## Selecting on a multi-axis by label: # - df.loc[:, ['A','B','C']] df.loc[dates[2:5], ['A','B','C']] df.loc['20180525':'20180601',['B','C']] # + # Reduction in the dimensions of the returned object: # - df.loc['20180526', ["B", "D"]] # + ## Getting scalars (single values) # - df.loc['20180526', ["D"]] # + # same as above # - df.at[dates[5],'D'] # + ## Selection by Position # - df.iloc[3] # + # By integer slices, acting similar to numpy/python: # - df.iloc[2:5,:2] # + # By lists of integer position locations, similar to the numpy/python style: # - df.iloc[[3,5,1],[1,4,2]] df.iloc[2,2] # + # For getting fast access to a scalar (equivalent to the prior method): # - df.iat[2,2] # ## Boolean Indexing # + ## Using a single column’s values to select data. # - df[df.A > 0.2] df[df > 0] df[df > 1] s1 = pd.Series([x**3 for x in range(15)], index=pd.date_range('20130521', periods=15)) s1 df['F'] = s1 df # + ## This is apparently a bug! https://github.com/pandas-dev/pandas/issues/10440 # - df['F']=42 df df['G']=[x**3 for x in range(15)] # passing a fresh list to particular column df s1 s1+2 s1/3 df.at[dates[1], 'A'] = 33 df df.iat[7,4]= 42 df df3 = df.copy() df3[df3 > 0.2 ] = -df3 df3 # + # Missing Data # pandas primarily uses the value np.nan to represent missing data. It is by default not included in computations # - df['H'] = s1 df df.fillna(value=3.14) # + # there is also df.dropna() to drop any ROWS! with missing data # - # ## Operations df.mean() # + # Other axis # - df.mean(1) # + ## Apply # - df.apply(lambda x: x*3) # ie same as df*3 ts = pd.Series(np.random.randn(3650), index=pd.date_range('11/18/2008', periods=3650)) ts = ts.cumsum() # cumulative sum ts.plot() # + # CSV # Writing to a csv file. # - df.to_csv("testing.csv") # Reading from csv df5= pd.read_csv('resources/random4x9.csv') df5 df5 # + # Excel # - df.to_excel('myx.xlsx', sheet_name='Sheet1') df6=pd.read_excel('myx.xlsx', 'Sheet1', index_col=None, na_values=['NA']) df6 df.info() df.info(memory_usage="deep") # more reliable info
Pandas/Python Pandas Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="YJ0oxU79FwoT" # # Video-Aula # + colab={"base_uri": "https://localhost:8080/", "height": 321} id="M5CzQj6-F0-Y" outputId="5e003c50-e33b-4024-e32d-fc8cb61c0811" from IPython.display import YouTubeVideo YouTubeVideo('xRC95W55Y8g') # + [markdown] id="OrgR4QVdZr0a" # ## Obtenção dos Dados # # Este código baixa os arquivos do [_dataset_ MNIST](https://en.wikipedia.org/wiki/MNIST_database) a partir do [site](http://yann.lecun.com/exdb/mnist/) do [Prof. Yann LeCun](https://en.wikipedia.org/wiki/Yann_LeCun). # + id="D1ysJPoDYhDm" colab={"base_uri": "https://localhost:8080/"} outputId="4c0ec57f-947b-4a03-e934-92cc9f5e4db6" # Imagens de treinamento # !wget http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz -O train-images-idx3-ubyte.gz # Rótulos (classes) # !wget http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz -O train-labels-idx1-ubyte.gz # Imagens de validação # !wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz -O t10k-images-idx3-ubyte.gz # Rótulos de validação (classes) # !wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz -O t10k-labels-idx1-ubyte.gz # + [markdown] id="h-9Bk13cH3Qp" # Abaixo extraímos os arquivos comprimidos. # + id="0Y07CnRjaJYU" # Extrai os arquivos treinamento # !gunzip -f *.gz # + [markdown] id="hy4xs5F88s4y" # ## Importação de Bibliotecas # # + id="hWafj-52rxED" # Importa as bibliotecas que usaremos import torch import torch.nn as nn import torch.functional as F import torch.optim as optim import numpy as np from matplotlib import pyplot as plt # Ajusta os parâmetro de precisão para # as variáveis que serão impressas na tela torch.set_printoptions(precision=2, sci_mode=False) np.set_printoptions(precision=2) # + [markdown] id="X4O4zi5lf04t" # ## Leitura dos Dados # # Abaixo definimos as funções auxiliares para leitura do arquivo de imagens # + id="3-GlQ5rjbPwg" from struct import unpack def read_imgs(img_filename): ''' Esta função lê o arquivo de imagens da base de dados MNIST ''' # Abre o arquivo img_file = open(img_filename,'rb') # Lê o cabeçalho do arquivo magic = unpack('>i', img_file.read(4))[0] total = unpack('>i', img_file.read(4))[0] height = unpack('>i', img_file.read(4))[0] width = unpack('>i', img_file.read(4))[0] # Verifica se o arquivo passa no teste # básico (este número deve ser sempre 2051) if magic != 2051: print('Erro, este arquivo não parece ser um arquivo de imagens MNIST') # Aqui criamos a array do NumPy que armazenará # as imagens imgs = np.zeros((total,height,width), dtype=float) # Nesse laço vamos lendo cada pixel e preenchendo # no array for k in range(total): # Cada amostra k for i in range(height): # Cada linha i for j in range(width): # Cada coluna j imgs[k,i,j] = ord(img_file.read(1)) # Lemos 1 byte # Retornamos o array preenchido # e já normalizado para valores # entre zero e um return imgs / 255.0 # + [markdown] id="ex6SljeojCtM" # De forma semelhante ao realizado acima, aqui abaixo definimos as funções auxiliares para leitura do arquivo de rótulos. # + id="gpK1CJ0mgAzn" def read_labels(labels_filename): ''' Esta função lê o arquivo de rótulos da base de dados MNIST ''' # Abre o arquivo labels_file = open(labels_filename,'rb') # Lê o cabeçalho do arquivo magic = unpack('>i', labels_file.read(4))[0] total = unpack('>i', labels_file.read(4))[0] # Verifica se o arquivo passa no teste # básico (este número deve ser sempre 2051) if magic != 2049: print('Erro, este arquivo não parece ser um arquivo de imagens MNIST') # Aqui criamos a array do NumPy que armazenará # as imagens labels = np.zeros((total), dtype=int) # Nesse laço vamos lendo cada label e preenchendo # no array for k in range(total): # Cada amostra k labels[k] = ord(labels_file.read(1)) # Lemos 1 byte # Retornamos o array preenchido return labels # + [markdown] id="PgwpORvVjHBf" # Nas linhas abaixo chamamos as função de leitura para carregar as imagens e os respectivos rótulos # + id="JEEgfzZli9yF" # Dados de treinamento Xt = read_imgs('train-images-idx3-ubyte') Yt = read_labels('train-labels-idx1-ubyte') # Dados de validação Xv = read_imgs('t10k-images-idx3-ubyte') Yv = read_labels('t10k-labels-idx1-ubyte') # + [markdown] id="4RC0FDZ-iSPc" # ## Embaralhamento das Amostras # + id="E1TxGaInUd-h" def shuffle_pair(x, y): idxs = list(range(len(y))) np.random.shuffle(idxs) return x[idxs], y[idxs] # + id="-ri4BPntUvNO" Xt, Yt = shuffle_pair(Xt, Yt) Xv, Yv = shuffle_pair(Xv, Yv) # + [markdown] id="CmBuEwouidRI" # ## Conversão para PyTorch # + id="VGVcLVR1FWwH" Xt = torch.tensor(Xt) Yt = torch.tensor(Yt, dtype=torch.long) Xv = torch.tensor(Xv) Yv = torch.tensor(Yv, dtype=torch.long) # + id="xSexj4xpGkiW" Xt = Xt.unsqueeze(1) Xv = Xv.unsqueeze(1) # + [markdown] id="4aWZ_bmPp8mV" # # Rede Neural # # O código abaixo definimos uma rede neural convolucional. # + id="KjRI2nrnxe6G" class ConvNet(nn.Module): def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 5, kernel_size=5) # 1x28x28 => 5x24x24 self.pool1 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(5, 8, kernel_size=3) # 5x12x12 => 10x10x10 self.drp1 = nn.Dropout2d(0.25) # 8x5x5 => self.pool2 = nn.MaxPool2d(2, 2) self.lin1 = nn.Linear(200, 10) # 200 => 10 def forward(self, x): x = self.conv1(x) x = torch.relu(x) x = self.pool1(x) x = self.conv2(x) x = self.drp1(x) x = torch.relu(x) x = self.pool2(x) x = x.view(-1, 200) x = self.lin1(x) return x # + id="BkJRqpcnX_4t" colab={"base_uri": "https://localhost:8080/"} outputId="f83e4391-3036-4afd-fae3-e3092fb5dfe4" cnn = ConvNet() print(cnn) # + [markdown] id="4i1e9vRBpLfK" # # Treinamento # # Esta rede neural será treinada em $1001$ épocas, e isso levará em torno de $20$ minutos (aproximadamente $2$ minutos por época) # + id="T0ya2ECu8O8b" # Aqui criamos o otimizador e a função # de perda opt = optim.Adam(cnn.parameters(), lr=0.0001) loss = nn.CrossEntropyLoss() # + id="sBj_p1Q98SIY" # Movemos tudo para a GPU # (essa parte é opcional) gpu = torch.device("cuda:0") cnn = cnn.to(gpu) Xt = Xt.to(gpu, dtype=torch.float) Yt = Yt.to(gpu, dtype=torch.long) Xv = Xv.to(gpu, dtype=torch.float) Yv = Yv.to(gpu, dtype=torch.long) # + id="iKL6ESmDMJNJ" # Essa função serve para verificarmos # a acurácia dos resultados, seja # nos pares de treinamento, seja # nos pares de validação. def evaluate(x, y_hat): y = cnn(x).argmax(dim=1) return 100*float((y == y_hat).sum()) / len(y) # + id="VU6OtvVY08_R" # Treinamento da rede neural # Treinamento por 10 épocas # usando lotes de 16 amostras # (pode repetir essa célula várias # vezes para tentar aumentar a # acurácia) for j in range(10): for i in range(0,len(Yt),16): x = Xt[i:i+16,:,:,:] y_hat = Yt[i:i+16] opt.zero_grad() y = cnn(x) e = loss(y, y_hat) e.backward() opt.step() print(float(e), evaluate(Xt, Yt)) # + [markdown] id="cs936nxvKK2F" # # Avaliação dos Resultados # + colab={"base_uri": "https://localhost:8080/"} id="2GTEysvGYaPw" outputId="a04c0f24-ef02-4d64-9ce2-de509efd4809" # Essa linha de código coloca a # rede neural no modo de avaliação # (tira do modo de treinamento). # Dessa forma o dropout é desativado cnn.eval() # + [markdown] id="f6ev_9PPhSui" # Abaixo mostramos, finalmente, a acurácia nos dados de validação. O valor pode até superar a acurácia do treinamento, já que aqui dropout está desabilitado. # + colab={"base_uri": "https://localhost:8080/"} id="JkTmpVquRSN3" outputId="ff0e2ec5-a534-462f-cb86-80039f0fcaac" print('Acurácia nos dados de validação:', evaluate(Xv, Yv)) # + [markdown] id="Ay_wjHQBhd_c" # No código abaixo mostramos alguns exemplos de saída calculada pela rede neural e respectiva saída desejada e imagem correspondente. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="M16NciUkfFv1" outputId="4e686191-520b-411c-c804-a181b6cbddce" for _ in range(5): idx = np.random.randint(0, len(Yv)) x = Xv[idx,0,:,:].cpu() y = int(cnn(Xv[idx,:,:,:].unsqueeze(1)).argmax(dim=1)) print('y =', y, 'y_hat =', int(Yv[idx])) plt.imshow(x, cmap='gray') plt.show() # + [markdown] id="TrUydRLphpXY" # A linha de código abaixo coloca a rede neural novamente em modo treinamento. # + colab={"base_uri": "https://localhost:8080/"} id="FqlKyTe8Y8AB" outputId="7ca3ef4f-5265-4326-d8c7-606e9caec581" cnn.train()
DeepLearning/03-Pytorch_Keras/MNIST_ConvNet_PyTorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/CarolLuca/NER_Legal_Domain_RO/blob/main/Behind%20The%20Model/behind_the_model_0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="vuBj9PIqIkWO" doc = nlp("This is a text") # + id="vHEuCXZIInCx" texts = ["This is a text", "These are lots of texts", "..."] - docs = [nlp(text) for text in texts] + docs = list(nlp.pipe(texts)) # + id="ZCSqUHWjIoW7" import spacy texts = [ "Net income was $9.4 million compared to the prior year of $2.7 million.", "Revenue exceeded twelve billion dollars, with a loss of $1b.", ] nlp = spacy.load("en_core_web_sm") for doc in nlp.pipe(texts, disable=["tagger", "parser"]): # Do something with the doc here print([(ent.text, ent.label_) for ent in doc.ents]) # + id="Yu2X5PBsIqIs" { "lang": "en", "name": "core_web_sm", "description": "Example model for spaCy", "pipeline": ["tagger", "parser", "ner"] } # + id="zaHo9KIpIr7v" nlp = spacy.load("en_core_web_sm") # + id="QqVVp2T6ItGv" lang = "en" pipeline = ["tagger", "parser", "ner"] data_path = "path/to/en_core_web_sm/en_core_web_sm-2.0.0" cls = spacy.util.get_lang_class(lang) # 1. Get Language instance, e.g. English() nlp = cls() # 2. Initialize it for name in pipeline: component = nlp.create_pipe(name) # 3. Create the pipeline components nlp.add_pipe(component) # 4. Add the component to the pipeline nlp.from_disk(model_data_path) # 5. Load in the binary data # + id="gIiplPr2IuqW" doc = nlp.make_doc("This is a sentence") # create a Doc from raw text for name, proc in nlp.pipeline: # iterate over components in order doc = proc(doc) # apply each component # + id="-9WJME-TI2a0" print(nlp.pipeline) # [('tagger', <spacy.pipeline.Tagger>), ('parser', <spacy.pipeline.DependencyParser>), ('ner', <spacy.pipeline.EntityRecognizer>)] print(nlp.pipe_names) # ['tagger', 'parser', 'ner'] # + id="JRYPhLhNI4Wt" # Option 1: Import and initialize from spacy.pipeline import EntityRuler ruler = EntityRuler(nlp) nlp.add_pipe(ruler) # Option 2: Using nlp.create_pipe sentencizer = nlp.create_pipe("sentencizer") nlp.add_pipe(sentencizer) # + id="tTq3HfupI50r" nlp = spacy.load("en_core_web_sm", disable=["tagger", "parser"]) nlp = English().from_disk("/model", disable=["ner"]) # + id="jX7KP9YwI_0B" for doc in nlp.pipe(texts, disable=["tagger", "parser"]): # Do something with the doc here # + id="FI5QYaVQJBr3" # 1. Use as a contextmanager with nlp.disable_pipes("tagger", "parser"): doc = nlp("I won't be tagged and parsed") doc = nlp("I will be tagged and parsed") # 2. Restore manually disabled = nlp.disable_pipes("ner") doc = nlp("I won't have named entities") disabled.restore() # + id="Xx0aDlawJDH_" nlp.remove_pipe("parser") nlp.rename_pipe("ner", "entityrecognizer") nlp.replace_pipe("tagger", my_custom_tagger) # + id="z2AFtY5vJEgq" def my_component(doc): # do something to the doc here return doc # + id="WCllh5bQJFwi" nlp.add_pipe(my_component) nlp.add_pipe(my_component, first=True) nlp.add_pipe(my_component, before="parser") # + id="EzyX7S4IJHF4" # #!/usr/bin/env python # coding: utf8 """Example of a spaCy v2.0 pipeline component that sets entity annotations based on list of single or multiple-word company names. Companies are labelled as ORG and their spans are merged into one token. Additionally, ._.has_tech_org and ._.is_tech_org is set on the Doc/Span and Token respectively. * Custom pipeline components: https://spacy.io//usage/processing-pipelines#custom-components Compatible with: spaCy v2.0.0+ Last tested with: v2.1.0 """ from __future__ import unicode_literals, print_function import plac from spacy.lang.en import English from spacy.matcher import PhraseMatcher from spacy.tokens import Doc, Span, Token @plac.annotations( text=("Text to process", "positional", None, str), companies=("Names of technology companies", "positional", None, str), ) def main(text="Alphabet Inc. is the company behind Google.", *companies): # For simplicity, we start off with only the blank English Language class # and no model or pre-defined pipeline loaded. nlp = English() if not companies: # set default companies if none are set via args companies = ["Alphabet Inc.", "Google", "Netflix", "Apple"] # etc. component = TechCompanyRecognizer(nlp, companies) # initialise component nlp.add_pipe(component, last=True) # add last to the pipeline doc = nlp(text) print("Pipeline", nlp.pipe_names) # pipeline contains component name print("Tokens", [t.text for t in doc]) # company names from the list are merged print("Doc has_tech_org", doc._.has_tech_org) # Doc contains tech orgs print("Token 0 is_tech_org", doc[0]._.is_tech_org) # "Alphabet Inc." is a tech org print("Token 1 is_tech_org", doc[1]._.is_tech_org) # "is" is not print("Entities", [(e.text, e.label_) for e in doc.ents]) # all orgs are entities class TechCompanyRecognizer(object): """Example of a spaCy v2.0 pipeline component that sets entity annotations based on list of single or multiple-word company names. Companies are labelled as ORG and their spans are merged into one token. Additionally, ._.has_tech_org and ._.is_tech_org is set on the Doc/Span and Token respectively.""" name = "tech_companies" # component name, will show up in the pipeline def __init__(self, nlp, companies=tuple(), label="ORG"): """Initialise the pipeline component. The shared nlp instance is used to initialise the matcher with the shared vocab, get the label ID and generate Doc objects as phrase match patterns. """ self.label = nlp.vocab.strings[label] # get entity label ID # Set up the PhraseMatcher – it can now take Doc objects as patterns, # so even if the list of companies is long, it's very efficient patterns = [nlp(org) for org in companies] self.matcher = PhraseMatcher(nlp.vocab) self.matcher.add("TECH_ORGS", None, *patterns) # Register attribute on the Token. We'll be overwriting this based on # the matches, so we're only setting a default value, not a getter. Token.set_extension("is_tech_org", default=False) # Register attributes on Doc and Span via a getter that checks if one of # the contained tokens is set to is_tech_org == True. Doc.set_extension("has_tech_org", getter=self.has_tech_org) Span.set_extension("has_tech_org", getter=self.has_tech_org) def __call__(self, doc): """Apply the pipeline component on a Doc object and modify it if matches are found. Return the Doc, so it can be processed by the next component in the pipeline, if available. """ matches = self.matcher(doc) spans = [] # keep the spans for later so we can merge them afterwards for _, start, end in matches: # Generate Span representing the entity & set label entity = Span(doc, start, end, label=self.label) spans.append(entity) # Set custom attribute on each token of the entity for token in entity: token._.set("is_tech_org", True) # Overwrite doc.ents and add entity – be careful not to replace! doc.ents = list(doc.ents) + [entity] for span in spans: # Iterate over all spans and merge them into one token. This is done # after setting the entities – otherwise, it would cause mismatched # indices! span.merge() return doc # don't forget to return the Doc! def has_tech_org(self, tokens): """Getter for Doc and Span attributes. Returns True if one of the tokens is a tech org. Since the getter is only called when we access the attribute, we can refer to the Token's 'is_tech_org' attribute here, which is already set in the processing step.""" return any([t._.get("is_tech_org") for t in tokens]) if __name__ == "__main__": plac.call(main) # Expected output: # Pipeline ['tech_companies'] # Tokens ['Alphabet Inc.', 'is', 'the', 'company', 'behind', 'Google', '.'] # Doc has_tech_org True # Token 0 is_tech_org True # Token 1 is_tech_org False # Entities [('Alphabet Inc.', 'ORG'), ('Google', 'ORG')] # + id="f5T6a8p0JKPb" "banana.vector": array([2.02280000e-01, -7.66180009e-02, 3.70319992e-01, 3.28450017e-02, -4.19569999e-01, 7.20689967e-02, -3.74760002e-01, 5.74599989e-02, -1.24009997e-02, 5.29489994e-01, -5.23800015e-01, -1.97710007e-01, -3.41470003e-01, 5.33169985e-01, -2.53309999e-02, 1.73800007e-01, 1.67720005e-01, 8.39839995e-01, 5.51070012e-02, 1.05470002e-01, 3.78719985e-01, 2.42750004e-01, 1.47449998e-02, 5.59509993e-01, 1.25210002e-01, -6.75960004e-01, 3.58420014e-01, # ... and so on ... 3.66849989e-01, 2.52470002e-03, -6.40089989e-01, -2.97650009e-01, 7.89430022e-01, 3.31680000e-01, -1.19659996e+00, -4.71559986e-02, 5.31750023e-01], dtype=float32) # + id="f8sFLqDxJMh4" import spacy nlp = spacy.load("en_core_web_md") tokens = nlp("dog cat banana afskfsd") for token in tokens: print(token.text, token.has_vector, token.vector_norm, token.is_oov) # + id="6zLP3tXEJOhp" import spacy nlp = spacy.load("en_core_web_md") # make sure to use larger model! tokens = nlp("dog cat banana") for token1 in tokens: for token2 in tokens: print(token1.text, token2.text, token1.similarity(token2)) # + id="UlTbbFo8JQWI" # #!/usr/bin/env python # coding: utf8 """Example of training spaCy's named entity recognizer, starting off with an existing model or a blank model. For more details, see the documentation: * Training: https://spacy.io/usage/training * NER: https://spacy.io/usage/linguistic-features#named-entities Compatible with: spaCy v2.0.0+ Last tested with: v2.2.4 """ from __future__ import unicode_literals, print_function import plac import random import warnings from pathlib import Path import spacy from spacy.util import minibatch, compounding # training data TRAIN_DATA = [ ("Who is <NAME>?", {"entities": [(7, 17, "PERSON")]}), ("I like London and Berlin.", {"entities": [(7, 13, "LOC"), (18, 24, "LOC")]}), ] @plac.annotations( model=("Model name. Defaults to blank 'en' model.", "option", "m", str), output_dir=("Optional output directory", "option", "o", Path), n_iter=("Number of training iterations", "option", "n", int), ) def main(model=None, output_dir=None, n_iter=100): """Load the model, set up the pipeline and train the entity recognizer.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner, last=True) # otherwise, get it so we can add labels else: ner = nlp.get_pipe("ner") # add labels for _, annotations in TRAIN_DATA: for ent in annotations.get("entities"): ner.add_label(ent[2]) # get names of other pipes to disable them during training pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] # only train NER with nlp.disable_pipes(*other_pipes), warnings.catch_warnings(): # show warnings for misaligned entity spans once warnings.filterwarnings("once", category=UserWarning, module='spacy') # reset and initialize the weights randomly – but only if we're # training a new model if model is None: nlp.begin_training() for itn in range(n_iter): random.shuffle(TRAIN_DATA) losses = {} # batch up the examples using spaCy's minibatch batches = minibatch(TRAIN_DATA, size=compounding(4.0, 32.0, 1.001)) for batch in batches: texts, annotations = zip(*batch) nlp.update( texts, # batch of texts annotations, # batch of annotations drop=0.5, # dropout - make it harder to memorise data losses=losses, ) print("Losses", losses) # test the trained model for text, _ in TRAIN_DATA: doc = nlp(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) for text, _ in TRAIN_DATA: doc = nlp2(text) print("Entities", [(ent.text, ent.label_) for ent in doc.ents]) print("Tokens", [(t.text, t.ent_type_, t.ent_iob) for t in doc]) if __name__ == "__main__": plac.call(main) # Expected output: # Entities [('<NAME>', 'PERSON')] # Tokens [('Who', '', 2), ('is', '', 2), ('Shaka', 'PERSON', 3), # ('Khan', 'PERSON', 1), ('?', '', 2)] # Entities [('London', 'LOC'), ('Berlin', 'LOC')] # Tokens [('I', '', 2), ('like', '', 2), ('London', 'LOC', 3), # ('and', '', 2), ('Berlin', 'LOC', 3), ('.', '', 2)] # + id="wAtRBApIJUYg" # #!/usr/bin/env python # coding: utf8 """Example of training an additional entity type This script shows how to add a new entity type to an existing pretrained NER model. To keep the example short and simple, only four sentences are provided as examples. In practice, you'll need many more — a few hundred would be a good start. You will also likely need to mix in examples of other entity types, which might be obtained by running the entity recognizer over unlabelled sentences, and adding their annotations to the training set. The actual training is performed by looping over the examples, and calling `nlp.entity.update()`. The `update()` method steps through the words of the input. At each word, it makes a prediction. It then consults the annotations provided on the GoldParse instance, to see whether it was right. If it was wrong, it adjusts its weights so that the correct action will score higher next time. After training your model, you can save it to a directory. We recommend wrapping models as Python packages, for ease of deployment. For more details, see the documentation: * Training: https://spacy.io/usage/training * NER: https://spacy.io/usage/linguistic-features#named-entities Compatible with: spaCy v2.1.0+ Last tested with: v2.2.4 """ from __future__ import unicode_literals, print_function import plac import random import warnings from pathlib import Path import spacy from spacy.util import minibatch, compounding # new entity label LABEL = "ANIMAL" # training data # Note: If you're using an existing model, make sure to mix in examples of # other entity types that spaCy correctly recognized before. Otherwise, your # model might learn the new type, but "forget" what it previously knew. # https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting TRAIN_DATA = [ ( "Horses are too tall and they pretend to care about your feelings", {"entities": [(0, 6, LABEL)]}, ), ("Do they bite?", {"entities": []}), ( "horses are too tall and they pretend to care about your feelings", {"entities": [(0, 6, LABEL)]}, ), ("horses pretend to care about your feelings", {"entities": [(0, 6, LABEL)]}), ( "they pretend to care about your feelings, those horses", {"entities": [(48, 54, LABEL)]}, ), ("horses?", {"entities": [(0, 6, LABEL)]}), ] @plac.annotations( model=("Model name. Defaults to blank 'en' model.", "option", "m", str), new_model_name=("New model name for model meta.", "option", "nm", str), output_dir=("Optional output directory", "option", "o", Path), n_iter=("Number of training iterations", "option", "n", int), ) def main(model=None, new_model_name="animal", output_dir=None, n_iter=30): """Set up the pipeline and entity recognizer, and train the new entity.""" random.seed(0) if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank("en") # create blank Language class print("Created blank 'en' model") # Add entity recognizer to model if it's not in the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if "ner" not in nlp.pipe_names: ner = nlp.create_pipe("ner") nlp.add_pipe(ner) # otherwise, get it, so we can add labels to it else: ner = nlp.get_pipe("ner") ner.add_label(LABEL) # add new entity label to entity recognizer # Adding extraneous labels shouldn't mess anything up ner.add_label("VEGETABLE") if model is None: optimizer = nlp.begin_training() else: optimizer = nlp.resume_training() move_names = list(ner.move_names) # get names of other pipes to disable them during training pipe_exceptions = ["ner", "trf_wordpiecer", "trf_tok2vec"] other_pipes = [pipe for pipe in nlp.pipe_names if pipe not in pipe_exceptions] # only train NER with nlp.disable_pipes(*other_pipes), warnings.catch_warnings(): # show warnings for misaligned entity spans once warnings.filterwarnings("once", category=UserWarning, module='spacy') sizes = compounding(1.0, 4.0, 1.001) # batch up the examples using spaCy's minibatch for itn in range(n_iter): random.shuffle(TRAIN_DATA) batches = minibatch(TRAIN_DATA, size=sizes) losses = {} for batch in batches: texts, annotations = zip(*batch) nlp.update(texts, annotations, sgd=optimizer, drop=0.35, losses=losses) print("Losses", losses) # test the trained model test_text = "Do you like horses?" doc = nlp(test_text) print("Entities in '%s'" % test_text) for ent in doc.ents: print(ent.label_, ent.text) # save model to output directory if output_dir is not None: output_dir = Path(output_dir) if not output_dir.exists(): output_dir.mkdir() nlp.meta["name"] = new_model_name # rename model nlp.to_disk(output_dir) print("Saved model to", output_dir) # test the saved model print("Loading from", output_dir) nlp2 = spacy.load(output_dir) # Check the classes have loaded back consistently assert nlp2.get_pipe("ner").move_names == move_names doc2 = nlp2(test_text) for ent in doc2.ents: print(ent.label_, ent.text) if __name__ == "__main__": plac.call(main)
Behind The Model/behind_the_model_0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python3 # --- # <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> # <script> # window.dataLayer = window.dataLayer || []; # function gtag(){dataLayer.push(arguments);} # gtag('js', new Date()); # # gtag('config', 'UA-59152712-8'); # </script> # # # Explicit Runge Kutta methods and their Butcher tables # ## Authors: <NAME> & <NAME> # # ## This tutorial notebook stores known explicit Runge Kutta-like methods as Butcher tables in a Python dictionary format. # # **Notebook Status:** <font color='green'><b> Validated </b></font> # # **Validation Notes:** This tutorial notebook has been confirmed to be **self-consistent with its corresponding NRPy+ module**, as documented [below](#code_validation). In addition, each of these Butcher tables has been verified to yield an RK method to the expected local truncation error in a challenging battery of ODE tests, in the [RK Butcher Table Validation tutorial notebook](Tutorial-RK_Butcher_Table_Validation.ipynb). # # ### NRPy+ Source Code for this module: [MoLtimestepping/RK_Butcher_Table_Dictionary.py](../edit/MoLtimestepping/RK_Butcher_Table_Dictionary.py) # # ## Introduction: # # The family of explicit [Runge Kutta](https://en.wikipedia.org/w/index.php?title=Runge%E2%80%93Kutta_methods&oldid=898536315)-like methods are commonly used when numerically solving ordinary differential equation (ODE) initial value problems of the form # # $$ y'(t) = f(y,t),\ \ \ y(t_0)=y_0.$$ # # These methods can be extended to solve time-dependent partial differential equations (PDEs) via the [Method of Lines](https://en.wikipedia.org/w/index.php?title=Method_of_lines&oldid=855390257). In the Method of Lines, the above ODE can be generalized to $N$ coupled ODEs, all written as first-order-in-time PDEs of the form # # $$ \partial_{t}\mathbf{u}(t,x,y,u_1,u_2,u_3,...)=\mathbf{f}(t,x,y,...,u_1,u_{1,x},...),$$ # # where $\mathbf{u}$ and $\mathbf{f}$ are vectors. The spatial partial derivatives of components of $\mathbf{u}$, e.g., $u_{1,x}$, may be computed using approximate numerical differentiation, like finite differences. # # As any explicit Runge-Kutta method has its own unique local truncation error, can in principle be used to solve time-dependent PDEs using the Method of Lines, and may be stable under different Courant-Friedrichs-Lewy (CFL) conditions, it is useful to have multiple methods at one's disposal. **This module provides a number of such methods.** # # More details about the Method of Lines is discussed further in the [Tutorial-RK_Butcher_Table_Generating_C_Code](Tutorial-RK_Butcher_Table_Generating_C_Code.ipynb) module where we generate the C code to implement the Method of Lines, and additional description can be found in the [Numerically Solving the Scalar Wave Equation: A Complete C Code](Tutorial-Start_to_Finish-ScalarWave.ipynb) NRPy+ tutorial notebook. # <a id='toc'></a> # # # Table of Contents # $$\label{toc}$$ # # This notebook is organized as follows # # 1. [Step 1](#initializenrpy): Initialize needed Python modules # 1. [Step 2](#introbutcher): The Family of Explicit Runge-Kutta-Like Schemes (Butcher Tables) # 1. [Step 2a](#codebutcher): Generating a Dictionary of Butcher Tables for Explicit Runge Kutta Techniques # 1. [Step 2.a.i](#euler): Euler's Method # 1. [Step 2.a.ii](#rktwoheun): RK2 Heun's Method # 1. [Step 2.a.iii](#rk2mp): RK2 Midpoint Method # 1. [Step 2.a.iv](#rk2ralston): RK2 Ralston's Method # 1. [Step 2.a.v](#rk3): Kutta's Third-order Method # 1. [Step 2.a.vi.](#rk3heun): RK3 Heun's Method # 1. [Step 2.a.vii](#rk3ralston): RK3 Ralston's Method # 1. [Step 2.a.viii](#ssprk3): Strong Stability Preserving Runge-Kutta (SSPRK3) Method # 1. [Step 2.a.ix](#rkfour): Classic RK4 Method # 1. [Step 2.a.x](#dp5): RK5 Dormand-Prince Method # 1. [Step 2.a.xi](#dp5alt): RK5 Dormand-Prince Method Alternative # 1. [Step 2.a.xii](#ck5): RK5 Cash-Karp Method # 1. [Step 2.a.xiii](#dp6): RK6 Dormand-Prince Method # 1. [Step 2.a.xiv](#l6): RK6 Luther Method # 1. [Step 2.a.xv](#dp8): RK8 Dormand-Prince Method # 1. [Step 3](#code_validation): Code Validation against `MoLtimestepping.RK_Butcher_Table_Dictionary` NRPy+ module # 1. [Step 4](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file # <a id='initializenrpy'></a> # # # Step 1: Initialize needed Python modules [Back to [top](#toc)\] # $$\label{initializenrpy}$$ # # Let's start by importing all the needed modules from Python: # Step 1: Initialize needed Python modules import sympy as sp # <a id='introbutcher'></a> # # # Step 2: The Family of Explicit Runge-Kutta-Like Schemes (Butcher Tables) [Back to [top](#toc)\] # $$\label{introbutcher}$$ # # In general, a predictor-corrector method performs an estimate timestep from $n$ to $n+1$, using e.g., a Runge Kutta method, to get a prediction of the solution at timestep $n+1$. This is the "predictor" step. Then it uses this prediction to perform another, "corrector" step, designed to increase the accuracy of the solution. # # Let us focus on the ordinary differential equation (ODE) # # $$ y'(t) = f(y,t), $$ # # which acts as an analogue for a generic PDE $\partial_{t}u(t,x,y,...)=f(t,x,y,...,u,u_x,...)$. # # The general family of Runge Kutta "explicit" timestepping methods are implemented using the following scheme: # # $$y_{n+1} = y_n + \sum_{i=1}^s b_ik_i $$ # # where # # \begin{align} # k_1 &= \Delta tf(y_n, t_n) \\ # k_2 &= \Delta tf(y_n + [a_{21}k_1], t_n + c_2\Delta t) \\ # k_3 &= \Delta tf(y_n +[a_{31}k_1 + a_{32}k_2], t_n + c_3\Delta t) \\ # & \ \ \vdots \\ # k_s &= \Delta tf(y_n +[a_{s1}k_1 + a_{s2}k_2 + \cdots + a_{s, s-1}k_{s-1}], t_n + c_s\Delta t) # \end{align} # # Note $s$ is the number of right-hand side evaluations necessary for any given method, i.e., for RK2 $s=2$ and for RK4 $s=4$, and for RK6 $s=7$. These schemes are often written in the form of a so-called "Butcher tableau". or "Butcher table": # # $$\begin{array}{c|ccccc} # 0 & \\ # c_2 & a_{21} & \\ # c_3 & a_{31} & a_{32} & \\ # \vdots & \vdots & & \ddots \\ # c_s & a_{s_1} & a_{s2} & \cdots & a_{s,s-1} \\ \hline # & b_1 & b_2 & \cdots & b_{s-1} & b_s # \end{array} $$ # # As an example, the "classic" fourth-order Runge Kutta (RK4) method obtains the solution $y(t)$ to the single-variable ODE $y'(t) = f(y(t),t)$ at time $t_{n+1}$ from $t_n$ via: # # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{2}k_1, t_n + \frac{\Delta t}{2}), \\ # k_3 &= \Delta tf(y_n + \frac{1}{2}k_2, t_n + \frac{\Delta t}{2}), \\ # k_4 &= \Delta tf(y_n + k_3, t_n + \Delta t), \\ # y_{n+1} &= y_n + \frac{1}{6}(k_1 + 2k_2 + 2k_3 + k_4) + \mathcal{O}\big((\Delta t)^5\big). # \end{align} # # It's corresponding Butcher table is constructed as follows: # # $$\begin{array}{c|cccc} # 0 & \\ # 1/2 & 1/2 & \\ # 1/2 & 0 & 1/2 & \\ # 1 & 0 & 0 & 1 & \\ \hline # & 1/6 & 1/3 & 1/3 & 1/6 # \end{array} $$ # # # This is one example of many explicit [Runge Kutta methods](https://en.wikipedia.org/w/index.php?title=List_of_Runge%E2%80%93Kutta_methods&oldid=896594269). Throughout the following sections we will highlight different Runge Kutta schemes and their Butcher tables from the first-order Euler's method up to and including an eighth-order method. # <a id='codebutcher'></a> # # ## Step 2.a: Generating a Dictionary of Butcher Tables for Explicit Runge Kutta Techniques [Back to [top](#toc)\] # $$\label{codebutcher}$$ # # We can store all of the Butcher tables in Python's **Dictionary** format using the curly brackets {} and 'key':value pairs. The 'key' will be the *name* of the Runge Kutta method and the value will be the Butcher table itself stored as a list of lists. The convergence order for each Runge Kutta method is also stored. We will construct the dictionary `Butcher_dict` one Butcher table at a time in the following sections. # + # Step 2a: Generating a Dictionary of Butcher Tables for Explicit Runge Kutta Techniques # Initialize the dictionary Butcher_dict Butcher_dict = {} # - # <a id='euler'></a> # # ### Step 2.a.i: Euler's Method [Back to [top](#toc)\] # $$\label{euler}$$ # # [Forward Euler's method](https://en.wikipedia.org/w/index.php?title=Euler_method&oldid=896152463) is a first order Runge Kutta method. Euler's method obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # $$y_{n+1} = y_{n} + \Delta tf(y_{n}, t_{n})$$ # with the trivial corresponding Butcher table # $$\begin{array}{c|c} # 0 & \\ \hline # & 1 # \end{array}$$ # # + # Step 2.a.i: Euler's Method Butcher_dict['Euler'] = ( [[sp.sympify(0)], ["", sp.sympify(1)]] , 1) # - # <a id='rktwoheun'></a> # # ### Step 2.a.ii: RK2 Heun's Method [Back to [top](#toc)\] # $$\label{rktwoheun}$$ # # [Heun's method](https://en.wikipedia.org/w/index.php?title=Heun%27s_method&oldid=866896936) is a second-order RK method that obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + k_1, t_n + \Delta t), \\ # y_{n+1} &= y_n + \frac{1}{2}(k_1 + k_2) + \mathcal{O}\big((\Delta t)^3\big). # \end{align} # with corresponding Butcher table # $$\begin{array}{c|cc} # 0 & \\ # 1 & 1 & \\ \hline # & 1/2 & 1/2 # \end{array} $$ # # + # Step 2.a.ii: RK2 Heun's Method Butcher_dict['RK2 Heun'] = ( [[sp.sympify(0)], [sp.sympify(1), sp.sympify(1)], ["", sp.Rational(1,2), sp.Rational(1,2)]] , 2) # - # <a id='rk2mp'></a> # # ### Step 2.a.iii: RK2 Midpoint Method [Back to [top](#toc)\] # $$\label{rk2mp}$$ # # [Midpoint method](https://en.wikipedia.org/w/index.php?title=Midpoint_method&oldid=886630580) is a second-order RK method that obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{2}{3}k_1, t_n + \frac{2}{3}\Delta t), \\ # y_{n+1} &= y_n + \frac{1}{2}k_2 + \mathcal{O}\big((\Delta t)^3\big). # \end{align} # with corresponding Butcher table # $$\begin{array}{c|cc} # 0 & \\ # 1/2 & 1/2 & \\ \hline # & 0 & 1 # \end{array} $$ # # + # Step 2.a.iii: RK2 Midpoint (MP) Method Butcher_dict['RK2 MP'] = ( [[sp.sympify(0)], [sp.Rational(1,2), sp.Rational(1,2)], ["", sp.sympify(0), sp.sympify(1)]] , 2) # - # <a id='rk2ralston'></a> # # ### Step 2.a.iv: RK2 Ralston's Method [Back to [top](#toc)\] # $$\label{rk2ralston}$$ # # Ralston's method (see [Ralston (1962)](https://www.ams.org/journals/mcom/1962-16-080/S0025-5718-1962-0150954-0/S0025-5718-1962-0150954-0.pdf), is a second-order RK method that obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{2}k_1, t_n + \frac{1}{2}\Delta t), \\ # y_{n+1} &= y_n + \frac{1}{4}k_1 + \frac{3}{4}k_2 + \mathcal{O}\big((\Delta t)^3\big). # \end{align} # with corresponding Butcher table # $$\begin{array}{c|cc} # 0 & \\ # 2/3 & 2/3 & \\ \hline # & 1/4 & 3/4 # \end{array} $$ # + # Step 2.a.iv: RK2 Ralston's Method Butcher_dict['RK2 Ralston'] = ( [[sp.sympify(0)], [sp.Rational(2,3), sp.Rational(2,3)], ["", sp.Rational(1,4), sp.Rational(3,4)]] , 2) # - # <a id='rk3'></a> # # ### Step 2.a.v: Kutta's Third-order Method [Back to [top](#toc)\] # $$\label{rk3}$$ # # [Kutta's third-order method](https://en.wikipedia.org/w/index.php?title=List_of_Runge%E2%80%93Kutta_methods&oldid=896594269) obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{2}k_1, t_n + \frac{1}{2}\Delta t), \\ # k_3 &= \Delta tf(y_n - k_1 + 2k_2, t_n + \Delta t) \\ # y_{n+1} &= y_n + \frac{1}{6}k_1 + \frac{2}{3}k_2 + \frac{1}{6}k_3 + \mathcal{O}\big((\Delta t)^4\big). # \end{align} # with corresponding Butcher table # \begin{array}{c|ccc} # 0 & \\ # 1/2 & 1/2 & \\ # 1 & -1 & 2 & \\ \hline # & 1/6 & 2/3 & 1/6 # \end{array} # + # Step 2.a.v: Kutta's Third-order Method Butcher_dict['RK3'] = ( [[sp.sympify(0)], [sp.Rational(1,2), sp.Rational(1,2)], [sp.sympify(1), sp.sympify(-1), sp.sympify(2)], ["", sp.Rational(1,6), sp.Rational(2,3), sp.Rational(1,6)]] , 3) # - # <a id='rk3heun'></a> # # ### Step 2.a.vi: RK3 Heun's Method [Back to [top](#toc)\] # $$\label{rk3heun}$$ # # [Heun's third-order method](https://en.wikipedia.org/w/index.php?title=List_of_Runge%E2%80%93Kutta_methods&oldid=896594269) obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{3}k_1, t_n + \frac{1}{3}\Delta t), \\ # k_3 &= \Delta tf(y_n + \frac{2}{3}k_2, t_n + \frac{2}{3}\Delta t) \\ # y_{n+1} &= y_n + \frac{1}{4}k_1 + \frac{3}{4}k_3 + \mathcal{O}\big((\Delta t)^4\big). # \end{align} # # with corresponding Butcher table # # \begin{array}{c|ccc} # 0 & \\ # 1/3 & 1/3 & \\ # 2/3 & 0 & 2/3 & \\ \hline # & 1/4 & 0 & 3/4 # \end{array} # # + # Step 2.a.vi: RK3 Heun's Method Butcher_dict['RK3 Heun'] = ( [[sp.sympify(0)], [sp.Rational(1,3), sp.Rational(1,3)], [sp.Rational(2,3), sp.sympify(0), sp.Rational(2,3)], ["", sp.Rational(1,4), sp.sympify(0), sp.Rational(3,4)]] , 3) # - # <a id='rk3ralston'></a> # # ### Step 2.a.vii: RK3 Ralton's Method [Back to [top](#toc)\] # $$\label{rk3ralston}$$ # # Ralston's third-order method (see [Ralston (1962)](https://www.ams.org/journals/mcom/1962-16-080/S0025-5718-1962-0150954-0/S0025-5718-1962-0150954-0.pdf), obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{2}k_1, t_n + \frac{1}{2}\Delta t), \\ # k_3 &= \Delta tf(y_n + \frac{3}{4}k_2, t_n + \frac{3}{4}\Delta t) \\ # y_{n+1} &= y_n + \frac{2}{9}k_1 + \frac{1}{3}k_2 + \frac{4}{9}k_3 + \mathcal{O}\big((\Delta t)^4\big). # \end{align} # # with corresponding Butcher table # # \begin{array}{c|ccc} # 0 & \\ # 1/2 & 1/2 & \\ # 3/4 & 0 & 3/4 & \\ \hline # & 2/9 & 1/3 & 4/9 # \end{array} # + # Step 2.a.vii: RK3 Ralton's Method Butcher_dict['RK3 Ralston'] = ( [[0], [sp.Rational(1,2), sp.Rational(1,2)], [sp.Rational(3,4), sp.sympify(0), sp.Rational(3,4)], ["", sp.Rational(2,9), sp.Rational(1,3), sp.Rational(4,9)]] , 3) # - # <a id='ssprk3'></a> # # ### Step 2.a.viii: Strong Stability Preserving Runge-Kutta (SSPRK3) Method [Back to [top](#toc)\] # $\label{ssprk3}$ # # The [Strong Stability Preserving Runge-Kutta (SSPRK3)](https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods#Kutta's_third-order_method) method obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + k_1, t_n + \Delta t), \\ # k_3 &= \Delta tf(y_n + \frac{1}{4}k_1 + \frac{1}{4}k_2, t_n + \frac{1}{2}\Delta t) \\ # y_{n+1} &= y_n + \frac{1}{6}k_1 + \frac{1}{6}k_2 + \frac{2}{3}k_3 + \mathcal{O}\big((\Delta t)^4\big). # \end{align} # # with corresponding Butcher table # # \begin{array}{c|ccc} # 0 & \\ # 1 & 1 & \\ # 1/2 & 1/4 & 1/4 & \\ \hline # & 1/6 & 1/6 & 2/3 # \end{array} # # + # Step 2.a.viii: Strong Stability Preserving Runge-Kutta (SSPRK3) Method Butcher_dict['SSPRK3'] = ( [[0], [sp.sympify(1), sp.sympify(1)], [sp.Rational(1,2), sp.Rational(1,4), sp.Rational(1,4)], ["", sp.Rational(1,6), sp.Rational(1,6), sp.Rational(2,3)]] , 3) # - # <a id='rkfour'></a> # # ### Step 2.a.ix: Classic RK4 Method [Back to [top](#toc)\] # $$\label{rkfour}$$ # # The [classic RK4 method](https://en.wikipedia.org/w/index.php?title=Runge%E2%80%93Kutta_methods&oldid=894771467) obtains the solution $y(t)$ at time $t_{n+1}$ from $t_n$ via: # # \begin{align} # k_1 &= \Delta tf(y_n, t_n), \\ # k_2 &= \Delta tf(y_n + \frac{1}{2}k_1, t_n + \frac{\Delta t}{2}), \\ # k_3 &= \Delta tf(y_n + \frac{1}{2}k_2, t_n + \frac{\Delta t}{2}), \\ # k_4 &= \Delta tf(y_n + k_3, t_n + \Delta t), \\ # y_{n+1} &= y_n + \frac{1}{6}(k_1 + 2k_2 + 2k_3 + k_4) + \mathcal{O}\big((\Delta t)^5\big). # \end{align} # # with corresponding Butcher table # # $$\begin{array}{c|cccc} # 0 & \\ # 1/2 & 1/2 & \\ # 1/2 & 0 & 1/2 & \\ # 1 & 0 & 0 & 1 & \\ \hline # & 1/6 & 1/3 & 1/3 & 1/6 # \end{array} $$ # + # Step 2.a.vix: Classic RK4 Method Butcher_dict['RK4'] = ( [[sp.sympify(0)], [sp.Rational(1,2), sp.Rational(1,2)], [sp.Rational(1,2), sp.sympify(0), sp.Rational(1,2)], [sp.sympify(1), sp.sympify(0), sp.sympify(0), sp.sympify(1)], ["", sp.Rational(1,6), sp.Rational(1,3), sp.Rational(1,3), sp.Rational(1,6)]] , 4) # - # <a id='dp5'></a> # # ### Step 2.a.x: RK5 Dormand-Prince Method [Back to [top](#toc)\] # $$\label{dp5}$$ # # The fifth-order Dormand-Prince (DP) method from the RK5(4) family (see [<NAME>.; <NAME>. (1980)](https://www.sciencedirect.com/science/article/pii/0771050X80900133?via%3Dihub)) Butcher table is: # # $$\begin{array}{c|ccccccc} # 0 & \\ # \frac{1}{5} & \frac{1}{5} & \\ # \frac{3}{10} & \frac{3}{40} & \frac{9}{40} & \\ # \frac{4}{5} & \frac{44}{45} & \frac{-56}{15} & \frac{32}{9} & \\ # \frac{8}{9} & \frac{19372}{6561} & \frac{−25360}{2187} & \frac{64448}{6561} & \frac{−212}{729} & \\ # 1 & \frac{9017}{3168} & \frac{−355}{33} & \frac{46732}{5247} & \frac{49}{176} & \frac{−5103}{18656} & \\ # 1 & \frac{35}{384} & 0 & \frac{500}{1113} & \frac{125}{192} & \frac{−2187}{6784} & \frac{11}{84} & \\ \hline # & \frac{35}{384} & 0 & \frac{500}{1113} & \frac{125}{192} & \frac{−2187}{6784} & \frac{11}{84} & 0 # \end{array} $$ # + # Step 2.a.x: RK5 Dormand-Prince Method Butcher_dict['DP5'] = ( [[0], [sp.Rational(1,5), sp.Rational(1,5)], [sp.Rational(3,10),sp.Rational(3,40), sp.Rational(9,40)], [sp.Rational(4,5), sp.Rational(44,45), sp.Rational(-56,15), sp.Rational(32,9)], [sp.Rational(8,9), sp.Rational(19372,6561), sp.Rational(-25360,2187), sp.Rational(64448,6561), sp.Rational(-212,729)], [sp.sympify(1), sp.Rational(9017,3168), sp.Rational(-355,33), sp.Rational(46732,5247), sp.Rational(49,176), sp.Rational(-5103,18656)], [sp.sympify(1), sp.Rational(35,384), sp.sympify(0), sp.Rational(500,1113), sp.Rational(125,192), sp.Rational(-2187,6784), sp.Rational(11,84)], ["", sp.Rational(35,384), sp.sympify(0), sp.Rational(500,1113), sp.Rational(125,192), sp.Rational(-2187,6784), sp.Rational(11,84), sp.sympify(0)]] , 5) # - # <a id='dp5alt'></a> # # ### Step 2.a.xi: RK5 Dormand-Prince Method Alternative [Back to [top](#toc)\] # $$\label{dp5alt}$$ # # The fifth-order Dormand-Prince (DP) method from the RK6(5) family (see [<NAME>.; <NAME>. (1981)](https://www.sciencedirect.com/science/article/pii/0771050X81900103)) Butcher table is: # # $$\begin{array}{c|ccccccc} # 0 & \\ # \frac{1}{10} & \frac{1}{10} & \\ # \frac{2}{9} & \frac{-2}{81} & \frac{20}{81} & \\ # \frac{3}{7} & \frac{615}{1372} & \frac{-270}{343} & \frac{1053}{1372} & \\ # \frac{3}{5} & \frac{3243}{5500} & \frac{-54}{55} & \frac{50949}{71500} & \frac{4998}{17875} & \\ # \frac{4}{5} & \frac{-26492}{37125} & \frac{72}{55} & \frac{2808}{23375} & \frac{-24206}{37125} & \frac{338}{459} & \\ # 1 & \frac{5561}{2376} & \frac{-35}{11} & \frac{-24117}{31603} & \frac{899983}{200772} & \frac{-5225}{1836} & \frac{3925}{4056} & \\ \hline # & \frac{821}{10800} & 0 & \frac{19683}{71825} & \frac{175273}{912600} & \frac{395}{3672} & \frac{785}{2704} & \frac{3}{50} # \end{array}$$ # + # Step 2.a.xi: RK5 Dormand-Prince Method Alternative Butcher_dict['DP5alt'] = ( [[0], [sp.Rational(1,10), sp.Rational(1,10)], [sp.Rational(2,9), sp.Rational(-2, 81), sp.Rational(20, 81)], [sp.Rational(3,7), sp.Rational(615, 1372), sp.Rational(-270, 343), sp.Rational(1053, 1372)], [sp.Rational(3,5), sp.Rational(3243, 5500), sp.Rational(-54, 55), sp.Rational(50949, 71500), sp.Rational(4998, 17875)], [sp.Rational(4, 5), sp.Rational(-26492, 37125), sp.Rational(72, 55), sp.Rational(2808, 23375), sp.Rational(-24206, 37125), sp.Rational(338, 459)], [sp.sympify(1), sp.Rational(5561, 2376), sp.Rational(-35, 11), sp.Rational(-24117, 31603), sp.Rational(899983, 200772), sp.Rational(-5225, 1836), sp.Rational(3925, 4056)], ["", sp.Rational(821, 10800), sp.sympify(0), sp.Rational(19683, 71825), sp.Rational(175273, 912600), sp.Rational(395, 3672), sp.Rational(785, 2704), sp.Rational(3, 50)]] , 5) # - # <a id='ck5'></a> # # ### Step 2.a.xii: RK5 Cash-Karp Method [Back to [top](#toc)\] # $$\label{ck5}$$ # # The fifth-order Cash-Karp Method (see [<NAME>, <NAME>. (1980)](https://dl.acm.org/citation.cfm?doid=79505.79507)) Butcher table is: # # $$\begin{array}{c|cccccc} # 0 & \\ # \frac{1}{5} & \frac{1}{5} & \\ # \frac{3}{10} & \frac{3}{40} & \frac{9}{40} & \\ # \frac{3}{5} & \frac{3}{10} & \frac{−9}{10} & \frac{6}{5} & \\ # 1 & \frac{−11}{54} & \frac{5}{2} & \frac{−70}{27} & \frac{35}{27} & \\ # \frac{7}{8} & \frac{1631}{55296} & \frac{175}{512} & \frac{575}{13824} & \frac{44275}{110592} & \frac{253}{4096} & \\ \hline # & \frac{37}{378} & 0 & \frac{250}{621} & \frac{125}{594} & 0 & \frac{512}{1771} # \end{array}$$ # # # # + # Step 2.a.xii: RK5 Cash-Karp Method Butcher_dict['CK5'] = ( [[0], [sp.Rational(1,5), sp.Rational(1,5)], [sp.Rational(3,10),sp.Rational(3,40), sp.Rational(9,40)], [sp.Rational(3,5), sp.Rational(3,10), sp.Rational(-9,10), sp.Rational(6,5)], [sp.sympify(1), sp.Rational(-11,54), sp.Rational(5,2), sp.Rational(-70,27), sp.Rational(35,27)], [sp.Rational(7,8), sp.Rational(1631,55296), sp.Rational(175,512), sp.Rational(575,13824), sp.Rational(44275,110592), sp.Rational(253,4096)], ["",sp.Rational(37,378), sp.sympify(0), sp.Rational(250,621), sp.Rational(125,594), sp.sympify(0), sp.Rational(512,1771)]] , 5) # - # <a id='dp6'></a> # # ### Step 2.a.xiii: RK6 Dormand-Prince Method [Back to [top](#toc)\] # $$\label{dp6}$$ # # The sixth-order Dormand-Prince method (see [<NAME>.; <NAME>. (1981)](https://www.sciencedirect.com/science/article/pii/0771050X81900103)) Butcher Table is # # # $$\begin{array}{c|cccccccc} # 0 & \\ # \frac{1}{10} & \frac{1}{10} & \\ # \frac{2}{9} & \frac{-2}{81} & \frac{20}{81} & \\ # \frac{3}{7} & \frac{615}{1372} & \frac{-270}{343} & \frac{1053}{1372} & \\ # \frac{3}{5} & \frac{3243}{5500} & \frac{-54}{55} & \frac{50949}{71500} & \frac{4998}{17875} & \\ # \frac{4}{5} & \frac{-26492}{37125} & \frac{72}{55} & \frac{2808}{23375} & \frac{-24206}{37125} & \frac{338}{459} & \\ # 1 & \frac{5561}{2376} & \frac{-35}{11} & \frac{-24117}{31603} & \frac{899983}{200772} & \frac{-5225}{1836} & \frac{3925}{4056} & \\ # 1 & \frac{465467}{266112} & \frac{-2945}{1232} & \frac{-5610201}{14158144} & \frac{10513573}{3212352} & \frac{-424325}{205632} & \frac{376225}{454272} & 0 & \\ \hline # & \frac{61}{864} & 0 & \frac{98415}{321776} & \frac{16807}{146016} & \frac{1375}{7344} & \frac{1375}{5408} & \frac{-37}{1120} & \frac{1}{10} # \end{array}$$ # # # + # Step 2.a.xiii: RK6 Dormand-Prince Method Butcher_dict['DP6'] = ( [[0], [sp.Rational(1,10), sp.Rational(1,10)], [sp.Rational(2,9), sp.Rational(-2, 81), sp.Rational(20, 81)], [sp.Rational(3,7), sp.Rational(615, 1372), sp.Rational(-270, 343), sp.Rational(1053, 1372)], [sp.Rational(3,5), sp.Rational(3243, 5500), sp.Rational(-54, 55), sp.Rational(50949, 71500), sp.Rational(4998, 17875)], [sp.Rational(4, 5), sp.Rational(-26492, 37125), sp.Rational(72, 55), sp.Rational(2808, 23375), sp.Rational(-24206, 37125), sp.Rational(338, 459)], [sp.sympify(1), sp.Rational(5561, 2376), sp.Rational(-35, 11), sp.Rational(-24117, 31603), sp.Rational(899983, 200772), sp.Rational(-5225, 1836), sp.Rational(3925, 4056)], [sp.sympify(1), sp.Rational(465467, 266112), sp.Rational(-2945, 1232), sp.Rational(-5610201, 14158144), sp.Rational(10513573, 3212352), sp.Rational(-424325, 205632), sp.Rational(376225, 454272), sp.sympify(0)], ["", sp.Rational(61, 864), sp.sympify(0), sp.Rational(98415, 321776), sp.Rational(16807, 146016), sp.Rational(1375, 7344), sp.Rational(1375, 5408), sp.Rational(-37, 1120), sp.Rational(1,10)]] , 6) # - # <a id='l6'></a> # # ### Step 2.a.xiv: RK6 Luther's Method [Back to [top](#toc)\] # $$\label{l6}$$ # # Luther's sixth-order method (see [<NAME> (1968)](http://www.ams.org/journals/mcom/1968-22-102/S0025-5718-68-99876-1/S0025-5718-68-99876-1.pdf)) Butcher table is: # $$\begin{array}{c|ccccccc} # 0 & \\ # 1 & 1 & \\ # \frac{1}{2} & \frac{3}{8} & \frac{1}{8} & \\ # \frac{2}{3} & \frac{8}{27} & \frac{2}{27} & \frac{8}{27} & \\ # \frac{(7-q)}{14} & \frac{(-21 + 9q)}{392} & \frac{(-56 + 8q)}{392} & \frac{(336 - 48q)}{392} & \frac{(-63 + 3q)}{392} & \\ # \frac{(7+q)}{14} & \frac{(-1155 - 255q)}{1960} & \frac{(-280 - 40q)}{1960} & \frac{320q}{1960} & \frac{(63 + 363q)}{1960} & \frac{(2352 + 392q)}{1960} & \\ # 1 & \frac{(330 + 105q)}{180} & \frac{2}{3} & \frac{(-200 + 280q)}{180} & \frac{(126 - 189q)}{180} & \frac{(-686 - 126q)}{180} & \frac{(490 - 70q)}{180} & \\ \hline # & \frac{1}{20} & 0 & \frac{16}{45} & 0 & \frac{49}{180} & \frac{49}{180} & \frac{1}{20} # \end{array}$$ # # where $q = \sqrt{21}$. # + # Step 2.a.xiv: RK6 Luther's Method q = sp.sqrt(21) Butcher_dict['L6'] = ( [[0], [sp.sympify(1), sp.sympify(1)], [sp.Rational(1,2), sp.Rational(3,8), sp.Rational(1,8)], [sp.Rational(2,3), sp.Rational(8,27), sp.Rational(2,27), sp.Rational(8,27)], [(7 - q)/14, (-21 + 9*q)/392, (-56 + 8*q)/392, (336 -48*q)/392, (-63 + 3*q)/392], [(7 + q)/14, (-1155 - 255*q)/1960, (-280 - 40*q)/1960, (-320*q)/1960, (63 + 363*q)/1960, (2352 + 392*q)/1960], [sp.sympify(1), ( 330 + 105*q)/180, sp.Rational(2,3), (-200 + 280*q)/180, (126 - 189*q)/180, (-686 - 126*q)/180, (490 - 70*q)/180], ["", sp.Rational(1, 20), sp.sympify(0), sp.Rational(16, 45), sp.sympify(0), sp.Rational(49, 180), sp.Rational(49, 180), sp.Rational(1, 20)]] , 6) # - # <a id='dp8'></a> # # ### Step 2.a.xv: RK8 Dormand-Prince Method [Back to [top](#toc)\] # $$\label{dp8}$$ # # The eighth-order Dormand-Prince Method (see [<NAME>.; <NAME>. (1981)](https://www.sciencedirect.com/science/article/pii/0771050X81900103)) Butcher table is: # # $$\begin{array}{c|ccccccccc} # 0 & \\ # \frac{1}{18} & \frac{1}{18} & \\ # \frac{1}{12} & \frac{1}{48} & \frac{1}{16} & \\ # \frac{1}{8} & \frac{1}{32} & 0 & \frac{3}{32} & \\ # \frac{5}{16} & \frac{5}{16} & 0 & \frac{-75}{64} & \frac{75}{64} & \\ # \frac{3}{8} & \frac{3}{80} & 0 & 0 & \frac{3}{16} & \frac{3}{20} & \\ # \frac{59}{400} & \frac{29443841}{614563906} & 0 & 0 & \frac{77736538}{692538347} & \frac{-28693883}{1125000000} & \frac{23124283}{1800000000} & \\ # \frac{93}{200} & \frac{16016141}{946692911} & 0 & 0 & \frac{61564180}{158732637} & \frac{22789713}{633445777} & \frac{545815736}{2771057229} & \frac{-180193667}{1043307555} & \\ # \frac{5490023248}{9719169821} & \frac{39632708}{573591083} & 0 & 0 & \frac{-433636366}{683701615} & \frac{-421739975}{2616292301} & \frac{100302831}{723423059} & \frac{790204164}{839813087} & \frac{800635310}{3783071287} & \\ # \frac{13}{20} & \frac{246121993}{1340847787} & 0 & 0 & \frac{-37695042795}{15268766246} & \frac{-309121744}{1061227803} & \frac{-12992083}{490766935} & \frac{6005943493}{2108947869} & \frac{393006217}{1396673457} & \frac{123872331}{1001029789} & \\ # \frac{1201146811}{1299019798} & \frac{-1028468189}{846180014} & 0 & 0 & \frac{8478235783}{508512852} & \frac{1311729495}{1432422823} & \frac{-10304129995}{1701304382} & \frac{-48777925059}{3047939560} & \frac{15336726248}{1032824649} & \frac{-45442868181}{3398467696} & \frac{3065993473}{597172653} & \\ # 1 & \frac{185892177}{718116043} & 0 & 0 & \frac{-3185094517}{667107341} & \frac{-477755414}{1098053517} & \frac{-703635378}{230739211} & \frac{5731566787}{1027545527} & \frac{5232866602}{850066563} & \frac{-4093664535}{808688257} & \frac{3962137247}{1805957418} & \frac{65686358}{487910083} & \\ # 1 & \frac{403863854}{491063109} & 0 & 0 & \frac{-5068492393}{434740067} & \frac{-411421997}{543043805} & \frac{652783627}{914296604} & \frac{11173962825}{925320556} & \frac{-13158990841}{6184727034} & \frac{3936647629}{1978049680} & \frac{-160528059}{685178525} & \frac{248638103}{1413531060} & 0 & \\ # & \frac{14005451}{335480064} & 0 & 0 & 0 & 0 & \frac{-59238493}{1068277825} & \frac{181606767}{758867731} & \frac{561292985}{797845732} & \frac{-1041891430}{1371343529} & \frac{760417239}{1151165299} & \frac{118820643}{751138087} & \frac{-528747749}{2220607170} & \frac{1}{4} # \end{array}$$ # # # + # Step 2.a.xv: RK8 Dormand-Prince Method Butcher_dict['DP8']=( [[0], [sp.Rational(1, 18), sp.Rational(1, 18)], [sp.Rational(1, 12), sp.Rational(1, 48), sp.Rational(1, 16)], [sp.Rational(1, 8), sp.Rational(1, 32), sp.sympify(0), sp.Rational(3, 32)], [sp.Rational(5, 16), sp.Rational(5, 16), sp.sympify(0), sp.Rational(-75, 64), sp.Rational(75, 64)], [sp.Rational(3, 8), sp.Rational(3, 80), sp.sympify(0), sp.sympify(0), sp.Rational(3, 16), sp.Rational(3, 20)], [sp.Rational(59, 400), sp.Rational(29443841, 614563906), sp.sympify(0), sp.sympify(0), sp.Rational(77736538, 692538347), sp.Rational(-28693883, 1125000000), sp.Rational(23124283, 1800000000)], [sp.Rational(93, 200), sp.Rational(16016141, 946692911), sp.sympify(0), sp.sympify(0), sp.Rational(61564180, 158732637), sp.Rational(22789713, 633445777), sp.Rational(545815736, 2771057229), sp.Rational(-180193667, 1043307555)], [sp.Rational(5490023248, 9719169821), sp.Rational(39632708, 573591083), sp.sympify(0), sp.sympify(0), sp.Rational(-433636366, 683701615), sp.Rational(-421739975, 2616292301), sp.Rational(100302831, 723423059), sp.Rational(790204164, 839813087), sp.Rational(800635310, 3783071287)], [sp.Rational(13, 20), sp.Rational(246121993, 1340847787), sp.sympify(0), sp.sympify(0), sp.Rational(-37695042795, 15268766246), sp.Rational(-309121744, 1061227803), sp.Rational(-12992083, 490766935), sp.Rational(6005943493, 2108947869), sp.Rational(393006217, 1396673457), sp.Rational(123872331, 1001029789)], [sp.Rational(1201146811, 1299019798), sp.Rational(-1028468189, 846180014), sp.sympify(0), sp.sympify(0), sp.Rational(8478235783, 508512852), sp.Rational(1311729495, 1432422823), sp.Rational(-10304129995, 1701304382), sp.Rational(-48777925059, 3047939560), sp.Rational(15336726248, 1032824649), sp.Rational(-45442868181, 3398467696), sp.Rational(3065993473, 597172653)], [sp.sympify(1), sp.Rational(185892177, 718116043), sp.sympify(0), sp.sympify(0), sp.Rational(-3185094517, 667107341), sp.Rational(-477755414, 1098053517), sp.Rational(-703635378, 230739211), sp.Rational(5731566787, 1027545527), sp.Rational(5232866602, 850066563), sp.Rational(-4093664535, 808688257), sp.Rational(3962137247, 1805957418), sp.Rational(65686358, 487910083)], [sp.sympify(1), sp.Rational(403863854, 491063109), sp.sympify(0), sp.sympify(0), sp.Rational(-5068492393, 434740067), sp.Rational(-411421997, 543043805), sp.Rational(652783627, 914296604), sp.Rational(11173962825, 925320556), sp.Rational(-13158990841, 6184727034), sp.Rational(3936647629, 1978049680), sp.Rational(-160528059, 685178525), sp.Rational(248638103, 1413531060), sp.sympify(0)], ["", sp.Rational(14005451, 335480064), sp.sympify(0), sp.sympify(0), sp.sympify(0), sp.sympify(0), sp.Rational(-59238493, 1068277825), sp.Rational(181606767, 758867731), sp.Rational(561292985, 797845732), sp.Rational(-1041891430, 1371343529), sp.Rational(760417239, 1151165299), sp.Rational(118820643, 751138087), sp.Rational(-528747749, 2220607170), sp.Rational(1, 4)]] , 8) # - # <a id='code_validation'></a> # # # Step 3: Code validation against `MoLtimestepping.RK_Butcher_Table_Dictionary` NRPy+ module [Back to [top](#toc)\] # $$\label{code_validation}$$ # # As a code validation check, we verify agreement in the dictionary of Butcher tables between # 1. this tutorial and # 2. the NRPy+ [MoLtimestepping.RK_Butcher_Table_Dictionary](../edit/MoLtimestepping/RK_Butcher_Table_Dictionary.py) module. # # We analyze all key/value entries in the dictionary for consistency. # Step 3: Code validation against MoLtimestepping.RK_Butcher_Table_Dictionary NRPy+ module import sys from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict as B_dict valid = True for key, value in Butcher_dict.items(): if Butcher_dict[key] != B_dict[key]: valid = False print(key) if valid == True and len(Butcher_dict.items()) == len(B_dict.items()): print("The dictionaries match!") else: print("ERROR: Dictionaries don't match!") sys.exit(1) # <a id='latex_pdf_output'></a> # # # Step 4: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] # $$\label{latex_pdf_output}$$ # # The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename # [Tutorial-RK_Butcher_Table_Dictionary.pdf](Tutorial-RK_Butcher_Table_Dictionary.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) # !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-RK_Butcher_Table_Dictionary.ipynb # !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Dictionary.tex # !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Dictionary.tex # !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Dictionary.tex # !rm -f Tut*.out Tut*.aux Tut*.log
Tutorial-RK_Butcher_Table_Dictionary.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import BernoulliNB from sklearn.feature_selection import SelectFromModel from sklearn.svm import SVC from sklearn.metrics import confusion_matrix, accuracy_score from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline gender = pd.read_csv('Transformed Data Set - Sheet1.csv') gender.head() # ### Grouping the categories gender_train, gender_test = train_test_split(gender, test_size = 0.3, random_state=42) # + col_to_feature = gender_train.columns[1:-1] col_to_feature.tolist() per_to_use = [0.1, 0.15, 0.2] new_dict = dict(zip(col_to_feature, per_to_use)) # - def find_non_rare_labels(df, variable, tolerance): temp = df[variable].value_counts(normalize=True) labels = [i for i in temp.loc[temp >tolerance ].index.values] return labels def rare_encoding(train,test, variable,tolerance): frequent_cat = find_non_rare_labels(train, variable, tolerance) train[variable] = np.where(train[variable].isin (frequent_cat), train[variable], 'Other') test[variable] = np.where(test[variable].isin (frequent_cat), test[variable], 'Other') return train, test for col in new_dict: gender_train, gender_test= rare_encoding(gender_train, gender_test , col,new_dict[col]) def find_category_mappings(df, variable, target): tmp = pd.DataFrame(df.groupby([variable])[target].mean()) tmp['non-target'] = 1 - tmp[target] tmp['ratio'] = np.log(tmp[target] / tmp['non-target']) tmp['ratio'] = np.where(tmp['ratio'].isin([np.inf , -np.inf]), tmp['ratio'].dropna().sample(random_state=42), tmp['ratio']) label_dict = tmp['ratio'].to_dict() return label_dict def integer_encode(df, variable, ordinal_mapping): df[variable] = df[variable].map(ordinal_mapping) # test[variable] = test[variable].map(ordinal_mapping) gender_train['Gender'] = np.where(gender_train['Gender'] == 'F', 1, 0) gender_test['Gender'] = np.where(gender_test['Gender'] == 'F', 1, 0) new_col = gender.columns[:-1] for col in new_col: label_dict = find_category_mappings(gender_train, col, 'Gender') integer_encode(gender_train, col, label_dict) gender_train for col in new_col: label_dict = find_category_mappings(gender_test, col, 'Gender') integer_encode(gender_test, col, label_dict) gender_test # + # gender_test['Favorite Beverage'] = np.where(gender_test['Favorite Beverage'].isin([-np.inf, np.inf]), gender_test['Favorite Beverage'].dropna().sample(), gender_test['Favorite Beverage'] ) # + # gender_test[gender_test['Favorite Beverage'].isin([-np.inf, np.inf])] # - def X_y(df, col): X = df.drop(col, axis=1) y = df[col] return X, y X_train, y_train = X_y(gender_train, 'Gender') X_test, y_test = X_y(gender_test, 'Gender') rf = RandomForestClassifier(random_state=42) los = LogisticRegression() sv = SVC() nb = BernoulliNB() model_col = [rf, los, sv,nb] train= [] test = [] col_model = ['random_forest', 'Logistic_Regression', 'Suport_vector', 'Naivebayes'] for model, col in zip(model_col, col_model): model.fit(X_train, y_train) train_score = model.score(X_train, y_train) test_score = model.score(X_test, y_test) y_pred = model.predict(X_test) train.append(train_score) test.append(test_score) # annot_kws = {"ha": 'right',"va": 'center'} # sns.heatmap(confusion_matrix(y_test, y_pred), annot=True, annot_kws=annot_kws) # plt.title(f'{col} , train_score:{train_score}, test_score: {test_score} ') # plt.show() print(f'{col} , train_score:{train_score}, test_score: {test_score} ') # print('\n') print(confusion_matrix(y_test, y_pred)) print('\n\n\n') pipe = Pipeline([('classifier' , RandomForestClassifier())]) param_grid = [ {'classifier' : [LogisticRegression()], 'classifier__penalty' : ['l1', 'l2'], 'classifier__C' : np.logspace(-4, 4, 20)}, # 'classifier__solver' : ['liblinear']}, {'classifier' : [RandomForestClassifier()], 'classifier__n_estimators' : list(range(10,101,10)), 'classifier__max_features' : [1, 2, 3]} ] clf = GridSearchCV(pipe, param_grid = param_grid, cv = 5, verbose=True, n_jobs=-1) best_model = clf.fit(X_train, y_train) best_model.best_estimator_.get_params()['classifier'] best_model.score(X_test, y_test) best_model.score(X_train, y_train) Cs = [0.001, 0.01, 0.1, 1, 10] gammas = [0.001, 0.01, 0.1, 1] param_grid = {'C': Cs, 'gamma' : gammas} grid_search = GridSearchCV(SVC(kernel='rbf'), param_grid, cv=3) grid_search.get_params() grid_search.fit(X_train, y_train) grid_search.score(X_train,y_train) grid_search.score(X_test,y_test) new = ['Cool', 'Rock', 'Wine', 'Coca Cola/Pepsi']
scratch_work/trial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Regression with Differential Privacy # We start by importing the required libraries and modules and collecting the data that we need from the [Adult dataset](https://archive.ics.uci.edu/ml/datasets/adult). import diffprivlib.models as dp import numpy as np from sklearn.linear_model import LogisticRegression # + X_train = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", usecols=(0, 4, 10, 11, 12), delimiter=", ") y_train = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", usecols=14, dtype=str, delimiter=", ") # - np.unique(y_train) # Let's also collect the test data from Adult to test our models once they're trained. # + X_test = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", usecols=(0, 4, 10, 11, 12), delimiter=", ", skiprows=1) y_test = np.loadtxt("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", usecols=14, dtype=str, delimiter=", ", skiprows=1) # Must trim trailing period "." from label y_test = np.array([a[:-1] for a in y_test]) # - np.unique(y_test) # ## Logistic Regression with no privacy # To begin, let's first train a regular (non-private) logistic regression classifier, and test its accuracy. clf = LogisticRegression(solver="lbfgs") clf.fit(X_train, y_train) baseline = clf.score(X_test, y_test) print("Non-private test accuracy: %.2f%%" % (baseline * 100)) # ## Differentially private logistic regression # Using the `diffprivlib.models.LogisticRegression` module of diffprivlib, we can train a logistic regression classifier while satisfying differential privacy. # # If we don't specify any parameters, the model defaults to `epsilon = 1` and `data_norm = None`. If the norm of the data is not specified at initialisation (as in this case), the norm will be calculated on the data when `.fit()` is first called and a warning will be thrown as it causes a privacy leak. To ensure no additional privacy leakage, we should specify the data norm explicitly as an argument, and choose the bounds indepedently of the data (i.e. using domain knowledge). # # Additionally, the high `data_norm` that is read from the data in this instance gives poor results, with accuracy only slightly better than random. This is as a result of the large amount of noise requires to protect data spread over a large domain. By clipping the data to a smaller domain, accuracy improves markedly, as demonstrated below. dp_clf = dp.LogisticRegression() dp_clf.fit(X_train, y_train) print("Differentially private test accuracy (epsilon=%.2f): %.2f%%" % (dp_clf.epsilon, dp_clf.score(X_test, y_test) * 100)) # By setting `epsilon = float("inf")`, we can produce the same result as the non-private logistic regression classifer. dp_clf = dp.LogisticRegression(epsilon=float("inf"), data_norm=1e5) dp_clf.fit(X_train, y_train) print("Agreement between non-private and differentially private (epsilon=inf) classifiers: %.2f%%" % (dp_clf.score(X_test, clf.predict(X_test)) * 100)) # ## Tradeoff of accuracy and privacy # We can also visualise the tradeoff between accuracy and `epsilon` using `matplotlib`. # + accuracy = [] epsilons = np.logspace(-3, 1, 500) for eps in epsilons: dp_clf = dp.LogisticRegression(epsilon=eps, data_norm=100) dp_clf.fit(X_train, y_train) accuracy.append(dp_clf.score(X_test, y_test)) # - # Let's save the results using `pickle` so we can reproduce the plot easily in the future. # + import pickle pickle.dump((epsilons, baseline, accuracy), open("lr_accuracy_500.p", "wb" ) ) # - # Let's plot the results using `matplotlib`. The discontinuty observed near `epsilon = 10` is an artifact of the model. Because of the norm-clipping applied to the dataset before training (`data_norm=100`), the accuracy plateaus without reaching the non-private baseline. # + import matplotlib.pyplot as plt import pickle epsilons, baseline, accuracy = pickle.load(open("lr_accuracy_500.p", "rb")) plt.semilogx(epsilons, accuracy, label="Differentially private") plt.plot(epsilons, np.ones_like(epsilons) * baseline, dashes=[2,2], label="Non-private") plt.title("Differentially private logistic regression accuracy") plt.xlabel("epsilon") plt.ylabel("Accuracy") plt.ylim(0, 1) plt.xlim(epsilons[0], epsilons[-1]) plt.legend(loc=3) plt.show()
Chapter2/differential_privacy/logistic_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Single Spectrum Analysis (SSA) # # Illustration de la méthode [SSA](https://en.wikipedia.org/wiki/Singular_spectrum_analysis) pour les séries temporelles appliquée à la détection de points aberrants. La méthode est décrite dans [Singular Spectrum Analysis: Methodology and Comparison](http://perso.ens-lyon.fr/patrick.flandrin/hassani.pdf). Voir aussi [Automated outlier detection in Singular Spectrum Analysis](https://www.statistics.gov.hk/wsc/CPS204-P35-S.pdf). from jyquickhelper import add_notebook_menu add_notebook_menu() # %matplotlib inline # ## Une série articielle # # On introduit quelques points aberrants, pour le reste, elle suit le modèle $y_t = \frac{9}{10} y_{t-2} + \epsilon_t + a_t$ où $a_t$ est le bruit aberrant qui survient quelques fois. import numpy.random as rnd import numpy N = 2000 bruit1 = rnd.normal(size=(N,)) temps = numpy.arange(N) bruit1[:5], temps[:5] # On crée un bruit aberrant. import random bruit2 = numpy.zeros((N,)) for i in range(0, 10): h = random.randint(0, N-1) bruit2[h] = rnd.normal() + 10 serie = [] y = 10 for i in range(N//2+100): serie.append(y + bruit1[i] + 0.0004 * temps[i] + bruit2[i]) if i > 30: y = 0.9 * serie[-2] Y = numpy.array(serie[-1000:]) Y[:5] import matplotlib.pyplot as plt fig, ax = plt.subplots(1, 2, figsize=(12, 4)) ax[0].plot(numpy.arange(len(Y)), Y) ax[1].plot(numpy.arange(800, 900), Y[800:900]) ax[0].set_title("Série temporelle simulée") ax[1].set_title("Même série temporelle simulée"); # ## Autocorrélations # # L'autocorrélogramme est définie par la série $cor(Y_t, Y_{t-d})_d$. On le calcule sur la série nettoyée de sa tendance. from pandas import DataFrame df = DataFrame(dict(Y=Y)) df.head() from statsmodels.tsa.tsatools import detrend df["notrend"] = detrend(df.Y) df.head() ax = df.plot() ax.set_title("Sans tendance"); # L'autocorrélogramme à proprement parler. from statsmodels.tsa.stattools import acf cor = acf(df.notrend) fig, ax = plt.subplots(1, 1) ax.plot(cor) ax.set_title("Autocorrélogramme"); # Etant donnée que la série $Y_t$ dépend de $Y_{t-2}$, on observe un pic pour $cor(Y_t, Y_{t-2})_d$ et pour tous les $d$ pairs. $cor(Y_t, Y_{t-4}) \sim cor(Y_t, Y_{t-2})^2$. On enlève ces effets récursifs en calculant l'autocorrélogramme partiel qui correspond à l'estimation des coefficients d'un [modèle autorégressif](https://fr.wikipedia.org/wiki/Processus_autor%C3%A9gressif) infini. from statsmodels.tsa.stattools import pacf pcor = pacf(df.notrend) fig, ax = plt.subplots(1, 1) ax.plot(pcor[1:]) ax.set_title("Autocorrélogramme partiel"); # ## SSA # # Ou Singular Spectrum Analysis. La méthode part de la matrice des séries décalées qu'on décompose avec la méthode SVD ou [Singular Value Decomposition](https://fr.wikipedia.org/wiki/D%C3%A9composition_en_valeurs_singuli%C3%A8res). # + def lagged_ts(serie, lag): dim = serie.shape[0] res = numpy.zeros((dim - lag + 1, lag)) for i in range(lag): res[:, i] = serie[i:dim-lag+i+1] return res lagged_ts(Y, 3) # - lag = lagged_ts(Y, 60) lag.shape from numpy.linalg import svd # u @ numpy.diag(s) @ vh u, s, vh = svd(lag) u.shape, s.shape, vh.shape d = numpy.zeros((941, 60)) d[:60,:60] = numpy.diag(s) (u @ d @ vh).shape fig, ax = plt.subplots(1,2, figsize=(12,4)) DataFrame(dict(valeur=s)).plot(kind="bar", ax=ax[0]) DataFrame(dict(valeur=s[1:15])).plot(kind="bar", ax=ax[1]) ax[0].set_title("Valeurs propres") ax[1].set_title("Valeurs propres sans la première"); # Je me représente la méthode SVD comme une façon de projeter des vecteurs sur l'espace vectoriel constitué des premiers vecteurs propres, à chaque dimension supplémentaire, c'est comme une pièce du puzzle qui s'assemble jusqu'à recomposer l'ensemble. Ce qu'on peut voir aussi comme ceci : np = 12 fig, ax = plt.subplots(np, 3, figsize=(14, np*2)) for n in range(np): i = n if n < 5 else n * 5 - 15 d = numpy.zeros((941, 60)) d[i, i] = s[i] X2 = u @ d @ vh pos = 0 #X2.shape[1] - 1 # série reconstruites avec un axe ax[n, 0].plot(X2[:,pos]) ax[n, 1].set_title("i=%d" % i) # série reconstruites avec un axe ax[n, 1].plot(X2[800:850,pos]) ax[n, 1].set_title("i=%d" % i) d = numpy.zeros((941, 60)) d[:i+1, :i+1] = numpy.diag(s[:i+1]) X2 = u @ d @ vh ax[n, 2].plot(X2[800:850,pos]) ax[n, 2].plot(Y[800:850]) ax[n, 2].set_title("-->i=%d + 1" % i) # ## La prédiction # # On veut prédire $Y_{t+1}$. L'idée consiste à appliquer la méthode en considérent $Y_{t+1}$ égale à $Y_t$ puis à remplacer cette prédiction par la valeur de la série reconstruite. On peut même prédire à un horizon plus grand que la valeur suivante. fig, ax = plt.subplots(1, 1, figsize=(5,5)) for i in range(0, 8): ax.plot([0, 5], [i, i], 'k-') if i < 6: ax.plot([i, i], [0, 7], 'k-') if i < 4: ax.text(i + 0.1, 1.5, "Y(t-%d)" % (4-i)) ax.text(i + 0.1, 0.5, "Y(t-%d)" % (3-i)) ax.text(4.1, 1.5, "Y(t)") ax.text(4.05, 0.5, "Y(t+1)=?") plt.axis('off'); # ## Les points aberrants # # On repère les points aberrants avec l'une méthode de son choix sur la série reconstruite. d = numpy.zeros((941, 60)) for i in range(0, 30): d[i, i] = s[i] X2 = u @ d @ vh from sklearn.covariance import EllipticEnvelope env = EllipticEnvelope(support_fraction=0.9) env.fit(X2[:,:30]) # L'idéal serait d'utiliser une méthode basée sur une ACP. Le plus proche reste le modèle gaussien avec [EllipticEnvelope](http://scikit-learn.org/dev/modules/generated/sklearn.covariance.EllipticEnvelope.html#sklearn.covariance.EllipticEnvelope). # + out = env.predict(X2[:,:30]) fig, ax = plt.subplots(1, 1, figsize=(12,2)) ax.plot((1 - out)/2, "-") ax.set_title("Points aberrants d'une série temporelles."); # - # On observe des plages contiguës. Cela signifie que d'une valeur aberrante contamine des vecteurs décalées consécutifs de la série $Y$. Il ne reste plus qu'à repérer la valeur incriminée. fig, ax = plt.subplots(1, 1, figsize=(14,2)) ax.plot(X2[:,0], label="serie") ax.plot((1 - out)*5, "-", label="outlier") ax.set_title("Points aberrants sur la série reconstruite") ax.legend(); fig, ax = plt.subplots(1, 1, figsize=(14,2)) ax.plot(Y, label="serie") ax.plot((1 - out)*5, "-", label="outlier") ax.set_title("Points aberrants sur la série brute") ax.legend(); # Ce qui a l'air de correspondre à la fin des grandes plages. On recommence avec la probabilité d'être un outlier. fig, ax = plt.subplots(1, 1, figsize=(14,2)) outp = env.decision_function(X2[:,:30]) ax.plot(Y, label="serie") ax.plot(outp, "-", label="Proba not outlier") ax.set_title("Points aberrants sur la série brute") ax.legend();
_doc/notebooks/2a/timeseries_ssa.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Activity 01: Implementing k-means Clustering # + from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import accuracy_score, silhouette_score import matplotlib.pyplot as plt import pandas as pd import numpy as np from scipy.spatial.distance import cdist import math np.random.seed(0) # %matplotlib inline # - # ## Seeds Dataset: Silhouette Score # Per the UCI Data Repository, the fields in the Seeds dataset we will be using correspond to: # # 1. area A, # 2. perimeter P, # 3. compactness C = 4*pi*A/P^2, # 4. length of kernel, # 5. width of kernel, # 6. asymmetry coefficient # 7. length of kernel groove. # All of these parameters were real-valued continuous. seeds = pd.read_csv('Seed_Data.csv') seeds.head() X = seeds[['A','P','C','LK','WK','A_Coef','LKG']] y = seeds['target'] # See what our features look like X.head() # + ## Bring back the function we created earlier def k_means(X, K): # Keep track of history so you can see K-Means in action centroids_history = [] labels_history = [] # Randomly initialize Kcentroids rand_index = np.random.choice(X.shape[0], K) centroids = X[rand_index] centroids_history.append(centroids) # While loop repeats process until difference between new/old centroids = 0 while True: # Euclidean distances are calculated for each point relative to centroids, and then np.argmin returns # the index location of the minimal distance - which cluster a point is assigned to labels = np.argmin(cdist(X, centroids), axis=1) labels_history.append(labels) # Take mean of points within clusters to find new centroids new_centroids = np.array([X[labels == i].mean(axis=0) for i in range(K)]) centroids_history.append(new_centroids) # If old centroids and new centroids no longer change, K-Means is complete and end. Otherwise continue if np.all(centroids == new_centroids): break centroids = new_centroids return centroids, labels, centroids_history, labels_history # - # Convert from Pandas dataframe to NumPy matrix X_mat = X.values # Run our Seeds matrix through the k_means function we created earlier centroids, labels, centroids_history, labels_history = k_means(X_mat, 3) # See what labels we got print(labels) # + # See how well our implementation of K-Means did plt.scatter(X['A'], X['LK']) plt.title('Wheat Seeds - Area vs Length of Kernel') plt.show() plt.scatter(X['A'], X['LK'], c=labels, cmap='tab20b') plt.title('Wheat Seeds - Area vs Length of Kernel') plt.show() # + # Calculate Silhouette Score silhouette_score(X[['A','LK']], labels)
Activity01/Activity01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="8VoL10BRfG5z" colab_type="text" # Often times when working with data, you will use a dictionary just so you can use key names to make reading the code and accessing the data easier to understand. Python has another container called a `namedtuple` that is a tuple, but has names for each position of the tuple. You create one by passing a name for the tuple type and a list of field names. # # For example, `Cookie = namedtuple("Cookie", ['name', 'quantity'])` will create a container, and you can create new ones of the type using `Cookie('chocolate chip', 1)` where you can access the name using the `name` attribute, and then get the `quantity` using the quantity attribute. # + id="WlWSv4c_YJ5R" colab_type="code" colab={} from collections import namedtuple # + id="I-_mTUE2fP8C" colab_type="code" colab={} nt = namedtuple('carro', ['marca', 'cor']) # Tipo: 'carro' # Atributos: 'marca' e 'cor' # + id="hLm_Ne2TfiDx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="00764102-1626-42eb-cc8d-95ff20cc6eba" lista = [] lista.append(nt('BMW','azul')) print(lista) lista.append(nt('Honda','vermelho')) print(lista) # + id="ST39sFTUg595" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="25718340-e660-4ed4-ea9b-632998846bfb" for i in lista: print(i.marca, i.cor)
Notebooks/NamedTuple.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/aggreyeric/100-Days-Of-ML-Code/blob/master/Recognizing_Faces_in_the_Wild.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Fs6ZaP7ES6rU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="afd859db-388e-41d2-ccbc-6dca2b66bc0d" # !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # + id="r_OOiFAuar4a" colab_type="code" colab={} import pandas import numpy as np import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = [116,10] import seaborn as sns from sklearn.model_selection import train_test_split import xgboost as xgb # + id="li7XfkRLbevl" colab_type="code" colab={} #authenticate auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # + id="tpKG6m6-dlhe" colab_type="code" colab={} #get file #https://drive.google.com/open?id=1HhOB1yRgKepqb0ty0vXo5bvzBDM0non6 #recognizing-faces-in-the-wild.zip all_data = drive.CreateFile({'id':'1HhOB1yRgKepqb0ty0vXo5bvzBDM0non6'}) all_data.GetContentFile('recognizing-faces-in-the-wild.zip') # + id="4DXq3FPXj_eU" colab_type="code" colab={} # + id="Hcekvctsj42W" colab_type="code" colab={} # + id="j06939hVjyPj" colab_type="code" colab={} # + id="JLmoLlG-jrTc" colab_type="code" colab={} # + id="2Vzc8EW0gO8d" colab_type="code" colab={} #unzip recognizing-faces-in-the-wild.zip # !unzip test # !unzip train # + id="ociU0umHhIyW" colab_type="code" colab={}
Recognizing_Faces_in_the_Wild.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Demonstration Notebook # # Train a model to recognize notes from input sounds # # ## Part 1: Data Preparation and Exploration # # By <NAME> \ # For Bnaura # # &copy; 2021 <NAME> <<EMAIL>> # # ## Contents # # 1. [Import Libraries](#lib_import) # 1. [Data Import](#data_import) # 1. [Data Exploration](#data_explore) # 1. [Data Augmentation](#data_aug) # 1. [Feature Extraction](#feat_extract) # 1. [Save Features](#feat_save) # # TO DO # - Add column to database for train=1, test=0 # - Generalize training functions to look for any files matching corresponding note tag # - Add _Male2 recordings # - Submodule repo into simple_gui # %load_ext autoreload # %autoreload 2 # ## <a id = "lib_import"></a>1. Import Libraries # + import sys import os import datetime import time import numpy as np import pandas as pd import matplotlib.pyplot as plt import ipywidgets as widgets from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn import svm from sklearn.preprocessing import LabelEncoder import xgboost as xgb import pickle import sqlite3 from scipy.io import wavfile as wav from IPython.display import Audio # Add custom modules to path module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import util.music_util as mus from util.ml_util import feat_extract, load_training_data from util import DATA_FOLDER, SCALE # - # ## <a id = "data_import"></a>2. Data Import X, y, fs = load_training_data(SCALE) # ## <a id = "data_explore"></a> 3. Data Exploration # ### Overview # Sanity check the dimensions of X and y print('X is a {} with shape {}'.format(type(X), X.shape)) print('X has {} rows, and expected # of rows from notes = {}'.format(X.shape[0], len(SCALE))) print('X sample length is {}'.format(X.shape[1])) print('y has {} samples, and expected # of samples from notes = {}'.format(len(y), len(SCALE))) # ### Plot Waveform # + # Pick a note to plot idx_test = 1 signal_test = X[idx_test,:] note_test = SCALE[idx_test] # Create array of time samples, converting number of samples and dividing by sample of frequency t_array = np.arange(X.shape[1])/fs # Plot waveform over short time period to see sine plt.subplot(121) plt.xlim(0.1,0.9) plt.xlabel('Time (s)') plt.title('Hummed {}: {} Hz'.format(note_test, mus.note_to_freq[note_test])) plt.plot(t_array, signal_test) # Plot waveform over shorter time period to see sine plt.subplot(122) plt.xlim(0.5 ,0.55) plt.xlabel('Time (s)') plt.title('Hummed {}: {} Hz'.format(note_test, mus.note_to_freq[note_test])) plt.plot(t_array, signal_test) plt.show() # - # ### Plot Frequency # + # Take FFT ftransform = np.fft.fft(signal_test)/len(signal_test) ftransform = ftransform[range(int(len(signal_test)/2))] tp_count = len(signal_test) vals = np.arange(tp_count/2) t_period = tp_count/fs # Scale to Hz frequency values for plotting freqs = vals/t_period plt.plot(freqs, abs(ftransform)) plt.xlim((0, 2*mus.note_to_freq[note_test])) plt.xlabel('Frequency (Hz)') plt.title('Hummed {}: {} Hz'.format(note_test, mus.note_to_freq[note_test])) plt.show() # - # ## <a id = "data_aug"></a> 4. Data Augmentation # # Synthesize dataset `aug_factor` times larger # + aug_factor = 20 # Initialize matrix where each row contains a noisy sample X_aug = np.empty((aug_factor*X.shape[0], X.shape[1])) y_aug = pd.DataFrame(np.empty((aug_factor*X.shape[0], 1)), columns=['note']) # - # ### 4.1 Add time shifts # Demonstrate time shift code arr1 = np.array(np.arange(1,6)) print(arr1) print(mus.add_timeshifts(arr1, samp_shift_max=4, debug=True)) # Apply time shifts to augmented training data # + # Add random time shifts noise to each element T_SHIFT_MAX = 0.1 # seconds DEBUG=False #X_aug = X_aug.apply(lambda x: mus.add_timeshifts(x, # samp_shift_max=np.int(T_SHIFT_MAX*fs), # debug=DEBUG), axis=1) for idx in range(X.shape[0]): # Initialize truth values in synthesized set y_aug.loc[aug_factor*idx:aug_factor*(idx+1)]=SCALE[idx] for sample in range(aug_factor): X_aug[sample+aug_factor*idx,:] = mus.add_timeshifts(X[idx,:], samp_shift_max=np.int(T_SHIFT_MAX*fs), debug=DEBUG) # - # ### 4.2 Add noise # Add noise to original recordings # + NOISE_AMPL = 100 # Add white noise to each element X_aug = pd.DataFrame(X_aug).apply(lambda x: mus.add_noise(x, ampl=NOISE_AMPL), axis=1).values # - # Sanity check the dimensions of the augmented training set print('X_aug has shape {}, with aug_factor = {}'.format(X_aug.shape, aug_factor)) print('Original X has shape {}'.format(X.shape)) print('y_aug has shape {}'.format(y_aug.shape)) # ### Plot example with added noise and shifts # + idx_test = 1 signal_test1 = X_aug[idx_test*aug_factor,:] signal_test2 = X_aug[idx_test*aug_factor+5,:] note_test = SCALE[idx_test] # Plot waveform over shorter time period to see sine plt.subplot(212) plt.xlim(0.5 ,0.55) plt.xlabel('Time (s)') plt.title('Examples with noise - Hummed {}: {} Hz'.format(note_test, mus.note_to_freq[note_test])) plt.plot(t_array, signal_test1) plt.plot(t_array, signal_test2) plt.show() # - # ## <a id = "feat_extract"></a> 5. Feature Extraction X_feat = feat_extract(X_aug, fs, mus.note_to_freq, SCALE, debug=True) X_feat.head() # ### Explore extracted features NOTE_TEST = 'C' plt.stem(X_feat[y_aug['note'].str.contains(NOTE_TEST)].mean()) #plt.bar(SCALE, X_feat[y_aug['note'].str.contains(NOTE_TEST)].mean()) plt.title('Relative power across {} note training samples'.format(NOTE_TEST)) plt.xlabel('MFCC filter #') plt.show() # + NOTE1 = 'E4' NOTE2 = 'G4' #plt.scatter(X_feat[y_aug['note'].str.contains(NOTE1)][NOTE1], # X_feat[y_aug['note'].str.contains(NOTE1)][NOTE2]) #plt.scatter(X_feat[y_aug['note'].str.contains(NOTE2)][NOTE1], # X_feat[y_aug['note'].str.contains(NOTE2)][NOTE2]) feat1 = 'mfcc_logfbank_1' feat2 = 'mfcc_logfbank_2' plt.scatter(X_feat[y_aug['note'].str.contains(NOTE1)][feat1], X_feat[y_aug['note'].str.contains(NOTE1)][feat2]) plt.scatter(X_feat[y_aug['note'].str.contains(NOTE2)][feat1], X_feat[y_aug['note'].str.contains(NOTE2)][feat2]) plt.ylabel('Relative power at {}'.format(NOTE2)) plt.xlabel('Relative power at {}'.format(NOTE1)) plt.legend([NOTE1, NOTE2]) plt.title('Feature separation between notes {} vs {}'.format(NOTE1, NOTE2)) plt.show() # - # ## <a id = "feat_save"></a> 6. Save Features # ### Using SQL # + FEAT_DB_NAME = os.path.join(DATA_FOLDER, 'features.db') TABLE_NAME = 'X_all' conn = sqlite3.connect(FEAT_DB_NAME) c = conn.cursor() # Create table c.execute('''CREATE TABLE IF NOT EXISTS {} ({} real, {} real, {} real, {} real, {} real, {} real)'''.format(TABLE_NAME, X_feat.columns[0], X_feat.columns[1], X_feat.columns[2], X_feat.columns[3], X_feat.columns[4], X_feat.columns[5])) # Commit the changes to db conn.commit() # Insert multiple records in a single query c.executemany('INSERT INTO {} VALUES(?,?,?,?,?,?);'.format(TABLE_NAME), X_feat.values); conn.commit() # Close the connection conn.close() # - # ### Using CSV # Split data for training and testing and save off features # + X_train, X_test, y_train, y_test = train_test_split(X_feat, y_aug, random_state=1) if not(os.path.exists(DATA_FOLDER)): os.mkdir(DATA_FOLDER) X_train.to_csv(os.path.join(DATA_FOLDER, 'X_train.csv'), index=False) X_test.to_csv(os.path.join(DATA_FOLDER, 'X_test.csv'), index=False) y_train.to_csv(os.path.join(DATA_FOLDER, 'y_train.csv'), index=False) y_test.to_csv(os.path.join(DATA_FOLDER, 'y_test.csv'), index=False) # -
0_notebooks/1_Data_Prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="1752ce061957fd1ce93d2e4db9494b7b29da7935" id="4skdndoa-7GI" # <center> # # # <img src="https://habrastorage.org/webt/jq/gc/r5/jqgcr5azmatev6zvgufn8xftehy.png"> # # # **II Escuela de Verano en Física Computacional** # # Este material está sujeto a los términos y condiciones de la licencia [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/). Se permite el uso gratuito para cualquier propósito no comercial. # # *También puede consultar la última versión de este notebook en nuestro [repositorio](https://github.com/PCPUNMSM) y los videos de clase [en nuestro canal de Youtube](https://www.youtube.com/channel/UCze8vzQLcplutz0nWDNjFCA).* # # + [markdown] _uuid="9b690f2c8129ad9445c544337375b93246c26668" id="Vm9O4SUd-7GJ" # # <center> Lista de Ejercicios # # # + [markdown] id="8c9en3VuOmmu" # # ### Problema 1: Raíces de ecuaciones # # # + [markdown] id="_SXtYxOtOmmu" # La siguiente fórmula nos describe la velocidad ascendente v de un cohete: # # $$v(t)=uln\left(\frac{m}{m-qt}\right)-gt$$ # # Donde: # # * u = módulo de la velocidad del combustible expulsado en relación con el cohete # # * m = masa inicial del cohete (en t=0) # # * g = módulo aceleración de la gravedad # # * t = tiempo # # + [markdown] id="uXLZje3vOmmv" # Dado: # # $$u= 950m/s,\quad m=2x10^5 kg, \quad q=3x10^3 kg/s, \quad g=9.8m/s^2, \quad t \in [10,25]$$ # + [markdown] id="kZtj6q2fOmmv" # a) Defina y utilice 3 métodos para hallar el tiempo que tarda el cohete en alcanzar la velocidad de 500 m/s. # # b) Resuelva analíticamente y compare resultados con los 3 métodos utilizados. # # c) Elabore gráficas y detalle su análisis. # + id="RgHjhJJrOmmv" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="80f60186-ea90-483b-fff4-a8a71767768d" import numpy as np import matplotlib.pyplot as plt u = 950 #m/s m = 2E5 #kg q = 3E3 #kg/s g = 9.8 #m/s^2 def v(t): return u*np.log(m/(m-q*t))-g*t def f(t): # esta es la función v(t)-500, porque queremos encontrar la raíz, i.e. el tiempo t* tal que v(t*)-500=0 o bien # v(t*)=500. return v(t)-500 #graficamos y notamos que la raíz está aproximadamente en t=40 t_plot = np.linspace(20,50,500) plt.plot(t_plot,v(t_plot)) plt.grid(True) plt.title('Velocidad de un cohete') plt.xlabel('Tiempo (s)') plt.ylabel('Velocidad (m/s)') plt.show() def dfdt(t): h = 1E-5 return (f(t+h/2)-f(t-h/2))/h #diferencia central # Cuando se alcance la precisión, los programas se detienen accuracy = 1E-10 # Método de Newton def newton_method(t): delta = 1.0 while abs(delta) > accuracy: delta = f(t)/dfdt(t) t -= delta return t t_newton = newton_method(40) print(f"Según método de Newton, el cohete alcanza la velocidad de 500m/s en el tiempo {t_newton}s") #método secante def secant_method(t,t0): delta = 1.0 while abs(delta) > accuracy: dfdt = (f(t)-f(t0))/(t-t0) delta = f(t)/dfdt t0 = t t -= delta return t t_secant = secant_method(30,60) print(f"Según método secante, el cohete alcanza la velocidad de 500m/s en el tiempo {t_secant}s") # bisección def biseccion(f, t1,t2,accuracy): #f(t1) y f(t2) deben tener signos distintos while abs(t1-t2) > accuracy: t = (t1+t2)/2 if np.sign(f(t))==np.sign(f(t1)): t1 = t elif np.sign(f(t))==np.sign(f(t2)): t2 = t return t t_bisec = biseccion(f,30,60,accuracy) print(f"Según método de bisección, el cohete alcanza la velocidad de 500m/s en el tiempo {t_bisec}s") ## Analíticamente, de acuerdo con Mathematica, el resultado es t=40.8159 s, de manera que las soluciones aquí dadas se aproximan bastante. ## Hacer el valor de la precisión (accuracy) más pequeño puede ayudar a encontrar soluciones más cercanas, aunque estas son bastante buenas. # + [markdown] id="tXH6PyZ32A_n" # Me tomé la libertad de escribir mis propios programas (con ayuda del libro de <NAME> "Computational Physics") para practicar un poco e investigar más a fondo las ventajas y desventajas de cada uno de estos métodos. # # Analíticamente, de acuerdo con Mathematica, el resultado es $t=40.8159 s$, de manera que las soluciones aquí dadas se aproximan bastante. # Hacer el valor de la precisión (accuracy) más pequeño puede ayudar a encontrar soluciones más cercanas, aunque estas son bastante buenas. # Lo que hicimos fue definir la función $f(t)=v(t)-500$ y hallar la raíz mínimo, ya que esto da el tiempo $t_0$ tal que $f(t_0)=v(t_0)-500=0$, o bien, $v(t_0)=500$, que es lo que queremos (ignoro unidades por simplicidad). # # El método de bisección (o búsqueda binaria), aquí utilizado, es un método robusto. Uno específica un intervalo en el que desea encontrar la solución a una ecuación. En este caso tuvimos que graficar para saber grosso modo en qué región se encuentra dicha solución. Supongámos que nuestros puntos iniciales $t_1$ y $t_2$ se encuentran a una distancia $\delta$; la distancia se disminuye a la mitad a cada paso de la búsqueda binaria, por lo que después de $N$ pasos, la distancia es $\delta/2^N$. Si tenemos, por ejemplo, puntos a una distancia inicial de $\delta=10^{10}$ con una precisión de $10^{-10}$, el número de pasos necesario para encontrar la raíz es $67$, que no requiere mucho tiempo de cómputo. Este método tiene sus desventajas, por ejemplo, si entre los puntos iniciales hay un número par de raíces. Por otro lado, a veces no es posible graficar para buscar un punto que a grosso modo sea cercano a la solución. # # El método de Newton requiere un solo valor inicial. Basado en este valor, usa la pendiente en esa posición para extrapolar y hallar un segundo punto que usualmente será más cercano a la solución que el primero (si uno tiene mala suerte, puede que sea más lejano). El método de Newton tiene converfencia cuadrática, lo que implica que converge extremadamente rápido. Las desventajas son que requiere que conozcamos la derivada de la función (podemos usar derivación numérica) y que no siempre converge (esto depende del valor de la derivada). # # El método de la secante arregla algunos problemas que se tienen en el de Newton, pues si no conocemos la derivada simplemente podemos calcularla numéricamente usando la secante (diferencia finita) en lugar de la tangente (derivada). Son formas similares; el método de la secante también converge con rapidez y tiene las mismas desventajas. También puede generalizarse a la solución de ecuaciones simultáneas no lineales, aunque la generalización puede ser complicada. # + [markdown] id="B5oZylATOmmw" # ### Problema 2: Interpolación # + [markdown] id="9DnOghRsOmmw" # Dadas las funciones: # # 1. $f (x) = sin(x^2) $ # # 2. $f (x) = e^{sin (x)}$ # # 3. $f (x) = \frac{0.2}{(x-3.2)^2 + 0.04}$ # # a) Estudiar el error en dos métodos de interpolación (de su elección) con 10,20 y 50 datos (uniformemente distribuidos) en el intervalo [0.0, 5.0] y compare los resultados. # # b) Hallar los órdenes de convergencia, elabore su gráfico con el eje x en base 2 y detallar su análisis. # # c) Añade una perturbación sinusoidal ($\delta g(x)=sin(x)$) del orden de $10^{-4},10^{-5}, 10^{-6}$ con distribución homogénea, grafice y detalle su análisis, ¿cuál es el error que generan con respecto a aproximación hecha en "2.a)"? # + id="VIAJguU-Ommx" # import numpy as np # import numpy.polynomial.polynomial as poly # import matplotlib.pyplot as plt # import sympy as sym # # Interpolacion de Lagrange # def f1(x): # return np.sin(x**2) # def f2(x): # return np.exp(np.sin(x)) # def f3(x): # return 0.2/((x-3.2)**2+0.04) # def lagrange(f,n): # # n es número de puntos y f la función # xi = np.linspace(0.0,5.0,n) # fi = f(xi) # # PROCEDIMIENTO # n = len(xi) # x = sym.Symbol('x') #variable simbólica # polinomio = 0 # divisorL = np.zeros(n, dtype = float) # for i in range(0,n,1): # # Termino de Lagrange # numerador = 1 # denominador = 1 # for j in range(0,n,1): # if (j!=i): # numerador = numerador*(x-xi[j]) # denominador = denominador*(xi[i]-xi[j]) # terminoLi = numerador/denominador # polinomio = polinomio + terminoLi*fi[i] # divisorL[i] = denominador # # simplifica el polinomio # #Dado un polinomio, expand() lo pondrá en una forma canónica de una suma de monomios. # polisimple = polinomio.expand() # # para evaluación numérica # px = sym.lambdify(x,polisimple) # # Puntos para la gráfica # muestras = 101 # a = np.min(xi) # b = np.max(xi) # pxi = np.linspace(a,b,muestras) # pfi = px(pxi) # # SALIDA # print(f'Polinomio de Lagrange para función {f.__name__} con {n} datos: ') # print(polisimple) # # Gráfica # plt.plot(xi,fi,'o', label = 'Puntos') # plt.plot(pxi,pfi, label = 'Polinomio') # plt.legend() # plt.xlabel('xi') # plt.ylabel('fi') # plt.grid() # plt.title('Interpolación Lagrange') # plt.show() # for n in [10,20,50]: # for func in [f1,f2,f3]: # lagrange(func,n) # + [markdown] id="CPiet4s9Ommx" # ### Problema 3: Derivación numérica # + [markdown] id="0XsAD08oOmmy" # En 1928, <NAME> propuso una teoría muy exitosa del decaímiento $\alpha$, según la cual la partícula $α$ se mueve libremente dentro del núcleo y se emite después de atravesar la barrera potencial entre ella y el núcleo hijo. En otras palabras, la partícula $α$, de carga $Z_2$, cuya energía es $E$, está atrapada en un pozo potencial de radio R por la barrera potencial: # # $$V(r)=\frac{Z_1Z_2e^2}{4\pi\epsilon_0 r}$$ # # para $r>R$. # # $Z_1 =Z-2, \quad Z_2=2$, donde Z es la carga del núcleo de radio R. # # $r$: es el radio fuera del núcleo ($r>R$ y con A, número de masa) donde: # # $$R = 1.5\times 10^{-15}\,A^{1/3}\,{\rm m}=2.0\times 10^{-15}\,Z_1^{\,1/3}\,{\rm m}$$ # + [markdown] id="yLRW88_oOmmy" # a) Calcule la fuerza que experimenta la partícula y analice $\alpha$ para $r\in [R,3R]$, utilizando los tres esquemas de diferencias finitas. # # b) Comparar los esquemas con la solución analítica, calcular los errores, órdenes de convergencia y detallar su análisis. # + id="g4rSazy2Ommy" colab={"base_uri": "https://localhost:8080/", "height": 855} outputId="ad9a580d-4bd0-4cfb-b71d-7b076d7e3413" import numpy as np import matplotlib.pyplot as plt # Sistema internacional epsilon0 = 8.85418782E-12 e = 1.60217662E-19 Z = 20 # Número de protones Z2 = 2 Z1 = Z-2 R = 2E-15*Z1**(1/3) N = 2 h0 = R*1*10**(-N) # Este valor de h0 quiere decir que por cada valor de r, # para evaluar la derivada estudiaremos dicho valor + # 1/10^n del valor de R def V(r): return Z1*Z2*e**2/(4*np.pi*epsilon0*r) def derivative(f,a,method='centrada',h=h0): if method == 'centrada': return (f(a + h) - f(a - h))/(2*h) elif method == 'adelante': return (f(a + h) - f(a))/h elif method == 'atrás': return (f(a) - f(a - h))/h else: raise ValueError("Los métodos pueden ser 'centrada', 'adelante' o 'atrás'.") # Derivada analítica de potencial V para calcular los errores def DV(r): return -Z1*Z2*e**2/(4*np.pi*epsilon0*r**2) # Nuestro dominio va de R a 3R r = np.linspace(R,3*R,100) DV_c = derivative(V,r) DV_ad = derivative(V,r,method='adelante') DV_at = derivative(V,r,method='atrás') plt.plot(r,DV_c) plt.plot(r,DV_ad) plt.plot(r,DV_at) plt.plot(r,DV(r)) plt.legend(['DF centrada','DF hacia adelante','DF hacia atrás','Analítica']) plt.title('Modelo de Gamow') plt.ylabel('Fuerza') plt.xlabel('Distancia') plt.grid() plt.show() # Calculamos errores comparando derivada real y derivadas numéricas c_err = DV(r)-DV_c ad_err = DV(r)-DV_ad at_err = DV(r)-DV_at plt.plot(r,c_err) plt.plot(r,ad_err) plt.plot(r,at_err) plt.legend(['DF centrada','DF hacia adelante','DF hacia atrás']) plt.title('Errores de truncamiento para distintas DFs') plt.ylabel('$O(h^2)$') plt.xlabel('θ') plt.grid() plt.show() h_l=[] #lista de tamaños de pasos errc_l=[] errad_l=[] errat_l=[] for i in range(10,10000,100): a = R b = 3*R h = 1/i*R #tamaño del paso N = round((b-a)/h) #redondear el resultado, el cual será la cantidad de datos r = np.linspace(a,b,N) DV_c=derivative(V,r, h=h) DV_ad=derivative(V,r, h=h,method='adelante') DV_at=derivative(V,r, h=h,method='atrás') #tomamos los máximos errores máximos por iteración sobre el dominio [a,b] c_err=np.max(np.abs(DV(r)-DV_c)) ad_err=np.max(np.abs(DV(r)-DV_ad)) at_err=np.max(np.abs(DV(r)-DV_at)) errc_l.append(c_err) errad_l.append(ad_err) errat_l.append(at_err) h_l.append(h) plt.loglog(h_l,errc_l) plt.loglog(h_l,errad_l) plt.loglog(h_l,errat_l) plt.legend(['DF centrada','DF hacia adelante','DF hacia atrás']) plt.title('log(Error absoluto) vs log(tamaño de paso)') plt.xlabel('log(h)') plt.ylabel('$log(e_{abs})$') plt.grid(True, which="both", ls="-") plt.show() # + [markdown] id="gGkoo_Go0vYI" # La diferencia central es similar a la diferencia hacia atrás y hacia adelante salvo por el hecho de que los dos puntos están posicionados simétricamente alrededor del valor del cual queremos conocer la derivada. El método de derivada central es tanto más adecuado. Por ejemplo, si $f(x)$ y $f'''(x)$ son del orden de la unidad, el valor ideal de $h$ será típicamente de $10^{-5}$, pero el error mismo será del orden de $10^{-10}$, bastante bueno para la mayoría de nuestros propósitos. Este comportamiento se observa en las gráficas: para el mismo valor de $h$ los órdenes de convergencia para la diferencia hacia atrás y hacia adelante son prácticamente iguales, pero para la diferencia central es, en el peor de los casos, un par de órdenes de magnitud mejor. Claro que una aproximación a orden superior en Taylor daría mejores resultados, pero ya que buscamos un balance entre precisión y tiempo de cómputo, una diferencia central es el mejor acercamiento a este orden. Es evidente que cuando $h$ es grande, ambos métodos fallarán bastante. Lo importante es notar cómo la diferencia central es una mejor elección independientemente del valor de $h$. En la gráfica de los errores de truncamiento observamos que para valores grandes de $r$ los tres métodos son buenos; sin embargo, cuando $r$ es cercano a $R$, los métodos de diferencia hacia atrás y hacia adelante muestran errores de orden cuadrático relativamente grandes. Esto puede ser más evidente en otras funciones y, como se ve en este caso, principalmente cerca de divergencias. # + [markdown] id="ZSeLKbwTOmmz" # ### Problema 4: Integración numérica # + [markdown] id="a5xZrZd5Ommz" # La carga a partir de la densidad radial de carga lineal, $\lambda$, sobre el área de la sección transversal del cilindro, está definida por: # # $$q_{total}=\int_A \lambda(r)dA(r)$$ # # donde, dA es el diferencial de área de la base del cilindro: # # $$q_{total}=\int_A \lambda(r)(2\pi r dr)$$ # # Sabemos que la densidad tiene la forma: # # $$\lambda(r)=\lambda_0 \left(1-\frac{r^2}{a^2} \right)$$ # # Y el campo eléctrico: # # $$\vec{E}=\frac{kq_{total}}{d^2}\hat r$$ # # d: distancia de la superficie al espacio a evaluar el campo eléctrico. ($d\gt r$) # # # a) Determina la distribución de la carga y campo eléctrico (a distancias $d \in \{ r+r/4,r+r/2\}$ ) en función al radio utilizando 3 métodos de integración. # # b) Obtener la solución analítica, comparar los errores de distintos métodos y detallar análisis. # # # + id="ov3gBG-bOmmz" colab={"base_uri": "https://localhost:8080/"} outputId="26c66add-4886-42b2-b052-20b9775cd956" import numpy as np import matplotlib.pyplot as plt import random as random l_0 = 1 k = 1 a = 1 # radio del cilindro # Hago 1 estos valores ya que no los proporcionaron x1 = 0 # límites de integración desde 0 hasta el radio del cilindro x2 = a # que elegimos como 1 def l(r): return l_0*(1-r**2/a**2) # Integrando def f(r): return l(r)*2*np.pi*r #montecarlo def montecarlo(f,b): n = 100 #número de subintervalos del intervalo [a, b] N = 100 #número de muestras x = np.zeros(n+1) #valores de x h = (x2-x1)/n #tamaño del paso total = 0 random.seed(55) for i in range(n): x[i+1] = x[i] + h #actualizando el valor de x f_mean = 0 #initialización for k in range(N): x_k = random.uniform(x[i],x[i+1]) #número aleatorio dentro del subintervalo f_mean += f(x_k) sub_area = (h/N)*f_mean #promedio para el sub-intervalo total += sub_area #promedio del intervalo return total # método del trapezoide def int_trapezoide(f,n,x2): N = 2**(n-1) # num de divisiones h = (x2-x1)/N # tamaño de divisiones if n == 1: return 0.5*h*(f(x1)+f(x2)) else: I = 1/2*int_trapezoide(f,n-1,x2) for k in range(1,N,2): I += h*f(x1+k*h) return I # método de simpson def int_simpson(f,n,x2): N = 2**(n-1) #num de divisiones h = (x2-x1)/N #tamaño de las divisiones def t(n): N = 2**(n-1) h = (x2-x1)/N #tamaño de las divisiones t = 0 for k in range(1,N,2): t += 2/3*f(x1+k*h) return t def s(n): if n == 1: return 1/3*(f(x1)+f(x2)) else: return s(n-1)+t(n-1) return h*(s(n)+2*t(n)) # tomamos k=1 y el radio del cilindro como r=1 def E_1(d,method): if method == 'trapezoide': dist_carga = int_trapezoide(f,8,x2) elif method == 'simpson': dist_carga = int_simpson(f,8,x2) elif method == 'montecarlo': dist_carga = montecarlo(f,x2) return k*dist_carga/d**2 print(f'La carga segun método trapezoide es {int_trapezoide(f,8,x2)}') print(f'La carga segun método simpson es {int_simpson(f,8,x2)}') print(f'La carga segun método montecarlo es {montecarlo(f,x2)}\n') # No entendí muy bien pero supongo que aquí se refieren a que # r es el radio del cilindro? # Distancia d = [1+1/4,1+1/2] # Sol analítica para distancia 1 y distancia 2 sol_analitica = [1.00531,0.698132] for method in ['trapezoide','simpson','montecarlo']: for i,val in enumerate(d): print(f'El campo elécrico a una distancia {val} del origen es {E_1(val,method)} usando el método {method}') print (f'Error = 1-sol. obtenida/sol.analítica={abs(1-E_1(val,method)/sol_analitica[i])}\n') # + [markdown] id="eL4s02nYDJh_" # La regla del trapezoide permite calcular integrales aproximando las funciones a orden lineal en intervalos pequeños (en este caso definidos por la distancia $h$). Esto explica su nombre. Es una aproximación no muy buena porque para funciones que cambian rápidamente en intervalos pequeños puede dar errores grandes. El método de Simpson, por su parte, simplemente mejora esta idea al considerar funciones cuadráticas y no sólo lineales entre los intervalos definidos por el programa. En ambos casos se puede hacer el cálculo más preciso al incrementar el número de divisiones. Comparando los programas arriba vemos que el método de Simpson es ligeramente más complicado que la regla del trapezoide, pero no por mucho. # # Ha de tenerse en cuenta que nuestras integrales son sólo aproximaciones. En primer lugar, está un error de redondeo; sin embargo, es más importante el error de aproximación. El error de la regla del trapezoide lo da la fórmula de Euler-Maclaurin, que es de orden cuadrático (y el siguiente orden es cuádrico porque los términos con $h^3$ se cancelan). Hay poco interés en hacer $h$ tan pequeño que el error de aproximación se vuelva despreciable a comparación del error de redondeo; esto sólo hace lento al programa sin mejorar la precisión. Es por eso que tenemos que ser cuidadosos al escoger $h$. Para la regla de Simspon, se llega a que el error es del orden de $h^4$, que es significativamente mejor que el caso del trapezoide. Hay casos aislados en los que la regla del trapezoide puede ser mejor que la de Simpson, por lo que es útil conocer ambos métodos. # # El método de Monte Carlo, por su parte, utiliza el concepto de aleatoriedad. Se toma un punto al azar, que puede caer por debajo o por encima de la curva. Si se hace esto con muchos puntos (digamos $N$) y $k$ de ellos caen debajo de la curva dentro de un rectángulo de área $A$, la integral queda aproximada por $I \approx kA/N$. La principal desventaja de Monte Carlo es que no da resultados muy precisos, pues requiere un gran poder computaacional para ello. Sin embargo, es útil en funciones patológicas en las que aproximar a orden lineal o cuadrático no da ninguna ventaja. Para ser más específicos, el error varía con $N$ como $N^{1/2}$, de modo que la tasa de convergencia es bastante baja. # # Para este caso particular ya que es una función sencilla de integrar, vemos que con poco esfuerzo (i.e. pocos pasos para cada método) se llegan a resultados prácticamente iguales al analítico y entre sí. (El integrando va como $r-r^3/a^2$.) El error es más pequeño para el método Simpson —orden $10^{-7}$—, como es de esperarse por lo ya argumentado. Los errores de la regla del trapezoide y Monte Carlo son bastante similares —orden $10^{-5}$—. # + [markdown] id="lLzG05ZVOmmz" # ### Problema 5: # + [markdown] id="nv5Tao_AOmm0" # Considerar un aro horizontal sin rozamiento de radio R. En este aro enhebro tres objetos con masas idénticas m, y, entre los objetos, tres muelles idénticos, cada uno con una masa constante de fuerza k. # + [markdown] id="f8OO4YhlOmm0" # <p align="center"> # <img src="https://habrastorage.org/webt/nz/qz/g4/nzqzg42gjarvpx3yk-fy0qgn20k.png" width=55% /> # </p> # # # + [markdown] id="VtlNTfl3Omm0" # Sabemos que, la energía cinética es: # # $$T=\frac{1}{2}mR^2(\dot{\phi_1^2}+\dot{\phi_2^2}+\dot{\phi_3^2})$$ # # y la energía potencial: # # $$U=\frac{1}{2}kR^2 [(\phi_1-\phi_2)^2+(\phi_2-\phi_3)^2+(\phi_3-\phi_1)^2]$$ # + [markdown] id="NrIEZnEIOmm0" # Resultado: Sistema de EDOs # # $$m\frac{d^2\phi_1}{dt^2}=-k(\phi_1-\phi_2)-k(\phi_1-\phi_3)$$ # # $$m\frac{d^2\phi_2}{dt^2}=-k(\phi_2-\phi_3)-k(\phi_2-\phi_1)$$ # # $$m\frac{d^2\phi_3}{dt^2}=-k(\phi_3-\phi_1)-k(\phi_3-\phi_2)$$ # # + [markdown] id="59ruLNuoOmm1" # a) Encontrar las ecuaciones de movimiento para cada masa usando las ecuaciones de Euler-Lagrange. *(ejemplo en PCP_sesión5.ipynb)* # # b) Resolver numéricamente el sistema de ecuaciones con los métodos de Euler y RK de orden 4. Para los datos: $\phi_1=\phi_2/2,\quad \phi_2=\phi_3/2,\quad \phi_3=\pi/10 \quad $ y $\quad \phi_1'=1,\quad \phi_2'=1,\quad \phi_3'=1, \quad R=m=k=1$ # # c) Mostrar los resultados y explicar la dinámica del sistema en base al resultado de la simulación. # + id="hKbNsZRpOmm1" colab={"base_uri": "https://localhost:8080/", "height": 573} outputId="40fff081-f089-46e8-9a23-fb53f0bdbb7d" import numpy as np steps = 300 # número de pasos de tiempo calculados endtime = 6.0 # fin de la simulación R=m=k=1 #########################################3 #EULER # crear matrices 1D de pasos de longitud +1 para tiempo (t), posición (x), velocidad (v) t = np.zeros(steps+1) phi1 = np.zeros(steps+1) phi2 = np.zeros(steps+1) phi3 = np.zeros(steps+1) v1 = np.zeros(steps+1) v2 = np.zeros(steps+1) v3 = np.zeros(steps+1) # inicializar variables en el tiempo = 0.0 phi3[0] = np.pi/10.0 phi2[0] = phi3[0]/2 phi1[0] = phi2[0]/2 v1[0] = 1.0 v2[0] = 1.0 v3[0] = 1.0 t[0] = 0.0 dt = endtime / float(steps) for i in range (steps): t[i+1] = t[i] + dt phi1[i+1] = phi1[i] + v1[i]*dt v1[i+1] = v1[i] +(-k/m*(phi1[i]-phi2[i])-k/m*(phi1[i]-phi3[i]))*dt phi2[i+1] = phi2[i] + v2[i]*dt v2[i+1] = v2[i] +(-k/m*(phi2[i]-phi3[i])-k/m*(phi2[i]-phi1[i]))*dt phi3[i+1] = phi3[i] + v3[i]*dt v3[i+1] = v3[i] +(-k/m*(phi3[i]-phi1[i])-k/m*(phi3[i]-phi2[i]))*dt plt.plot(t, phi1) plt.plot(t,phi2) plt.plot(t,phi3) plt.legend(['phi1','phi2','phi3']) plt.title('Método de Euler') plt.xlabel('Tiempo') plt.ylabel('Ángulo') plt.show() ######################################### # <NAME> # cambio el nombre de las variables phi_i a x,y,z para no confundirme def f(r,t): x,y,z,X,Y,Z = tuple(r) fx = X fy = Y fz = Z fX = -k/m*(x-y)-k/m*(x-z) fY = -k/m*(y-x)-k/m*(y-z) fZ = -k/m*(z-x)-k/m*(z-y) return np.array([fx,fy,fz,fX,fY,fZ],float) N = 200 # no. de puntos a = 0.0 # tiempo inicial b = 6.0 # tiempo final h = (b-a)/N # divisiones # inicializar variables en el tiempo = 0.0 r = np.array([np.pi/(10*4),np.pi/(10*2),np.pi/10,1.0,1.0,1.0],float) phi1points = [] phi2points = [] phi3points = [] tpoints = np.arange(a,b,h) for t in tpoints: phi1points.append(r[0]) phi2points.append(r[1]) phi3points.append(r[2]) k1 = h*f(r,t) k2 = h*f(r+1/2*k1,t+1/2*h) k3 = h*f(r+1/2*k2,t+1/2*h) k4 = h*f(r+k3,t+h) r += 1/6*(k1+2*k2+2*k3+k4) plt.plot(tpoints,phi1points) plt.plot(tpoints,phi2points) plt.plot(tpoints,phi3points) plt.legend(['phi1','phi2','phi3']) plt.title('Método RK 4') plt.xlabel('Tiempo') plt.ylabel('Ángulo') plt.show() # + [markdown] id="SGPFLO9SwXK9" # Las ecuaciones de Euler Lagrange son # $$\frac{\partial L}{\partial \phi_i}=\frac{d}{dt}\left(\frac{\partial L}{\partial \dot{\phi_i}}\right),$$ # donde el lagrangiano está dado por # $$L=T-U=\frac{1}{2}mR^2(\dot{\phi_1^2}+\dot{\phi_2^2}+\dot{\phi_3^2})-\frac{1}{2}kR^2 [(\phi_1-\phi_2)^2+(\phi_2-\phi_3)^2+(\phi_3-\phi_1)^2].$$ # # Observamos que la energía cinética es puramente función de la velocidad angular puesto que el radio es fijo. # # Sólo la energía cinética depende de $\dot{\phi_i}$ y sólo la energía potencial depende de $\phi_i$, de donde se obtiene fácilmente que # $$\frac{\partial L}{\partial \phi_1}=-kR^2[(\phi_1-\phi_2)-(\phi_3-\phi_1)]=-kR^2(\phi_1-\phi_2)-kR^2(\phi_1-\phi_3),$$ # y análogamente para las otras dos variables. Asimismo, # $$\frac{d}{dt}\left(\frac{\partial L}{\partial \dot{\phi_1}}\right)=mR^2\frac{d}{dt}(\dot\phi_1)=mR^2\frac{d^2 \phi_1}{dt^2}.$$ # # Por tanto, la ecuación de movimiento es # $$m\frac{d^2\phi_1}{dt^2}=-k(\phi_1-\phi_2)-k(\phi_1-\phi_3),$$ # y equivalentemente para $\phi_2$ y $\phi_3$: # $$m\frac{d^2\phi_2}{dt^2}=-k(\phi_2-\phi_3)-k(\phi_2-\phi_1),$$ # $$m\frac{d^2\phi_3}{dt^2}=-k(\phi_3-\phi_1)-k(\phi_3-\phi_2).$$ # # Estas ecuaciones describen la dinámica de la desviación de los ángulos $\phi_i$ a partir de la posición de equilibrio. Inicialmente, perturbamos el sistema de manera que # $$\phi_3(t=0)=\pi/10, \quad \phi_2(t=0)=\pi/20, \quad \phi_1(t=0)=\pi/40,$$ # i.e., que el objeto más desviado de su posición de equilibrio es el tercero, mientras que la desviación de los otros dos es igual la mitad y un cuarto respectivamente. # # También consideramos que las velocidades iniciales son # $\dot{\phi}_i(t=0)=1$ para los tres objetos. Lo que ambos métodos indican gráficamente es que los tres objetos que conforman el sistema oscilan periódicamente alrededor de una posición de equilibrio. La amplitud del tercero es la más grande, y esto es consecuencia de que sufre la mayor desviación inicial (el sistema de ecuaciones es de hecho simétrico para las tres variables angulares, de modo que habríamos obtenido resultados análogos permutando los valores iniciales). Identificando que $\theta +2\pi$ equivale a $\theta$, cada cierto tiempo el sistema regresa a la posición inicial, lo cual es resultado de la linealidad (pequeños desplazamientos desde la posición de equilibrio). # # Si pensamos la solución general como combinación lineal lineal de modos tenemos: # # a) La primera solución es una simple rotación, es decir, no hay oscilaciones. # b )En la segunda solución, dos masas oscilan en fase y la tercera con fase opuesta. # c) En la tercera solución una masa es estacionaria mientras que las otras dos masas oscilan con fase opuesta. # + [markdown] id="RA2FtkarOmm1" # Problema 6: (Opcional) # # De las sesiones brindadas, su propia experiencia y preferencias: Definir un problema a resolver con el enfoque numérico, explique a detalle cada consideración que realice y explique sus conclusiones. # # El problema puede ser la implementación de un modelo estándar discutido en libros de física (i.e. obtener el gráfico típico de estructura de ionización del átomo de Hidrógeno.), proyecto que esté desarrollando en su formación o complementar cálculos de las sesiones. # + id="C3YysC_JOmm1" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="73c43880-3f32-4e74-b4e0-ae88daf42875" from cmath import exp,sqrt,sin,pi import matplotlib.pyplot as plt from numpy import linspace,empty #d=n*pi/alpha slit_sep = 20 #mu m alpha = pi/slit_sep num_slits = 10 wavelength = 0.5 #mu m f = 100 #cm de manera que si x se mide en cm x*u/lambda*f es adimensional #x en cm, u en mu m, lambda en mu m, f en cm #x va de -10cm a 10cm def q(u): #u se mide en mu m return sin(alpha*u)**2 def integrand(u,x): return sqrt(q(u))*exp(1j*2*pi*x*u/(wavelength*f)) # regla del trapezoide def int_trapezoid(f,x,n): a = -(num_slits*slit_sep)/2 #numero de rendijas*separacion de rendijas/2 b = (num_slits*slit_sep)/2 N = 2**(n-1) h = (b-a)/N if n == 1: return 0.5*h*(f(a,x)+f(b,x)) else: I = 1/2*int_trapezoid(f,x,n-1) for k in range(1,N,2): I += h*f(a+k*h,x) return I def I(x): return abs(int_trapezoid(integrand,x,12))**2 x_vals = linspace(-5,5,500) I_vals = list(map(I,x_vals)) plt.title('Intensidad en la pantalla como función de la distancia') plt.xlabel('x') plt.ylabel('I(x)') plt.plot(x_vals,I_vals) plt.grid(True) plt.show() I_array = empty([100, 500], float) for k in range(100): I_array[k, :] = I_vals plt.imshow(I_array,vmax=3000) plt.gray() plt.show() # + [markdown] id="_AN8L4vRGuvi" # Para este ejercicio libre decidí resolver el problema 5.19 del libro de <NAME> http://www-personal.umich.edu/~mejn/cp/exercises.html # Quisiera mencionar que estuve leyendo este libro a la par del curso y algunas cosas las tomé de ahí para resolver estos ejercicios. Por eso tal vez perciban cierta nomenclatura diferente a lo que se vio en clase. # # En este problema se estudia la intensidad de luz difractada sobre una pantalla como función de la distancia. # En resumen, el problema da una función (intensidad de difracción) y pide lo siguiente: # # "Use su función en un programa para calcular y graficar la intensidad del patrón de difracción producido por una rejilla que tiene diez rendijas en total, si la luz incidente tiene una longitud de onda # $\lambda = 500 nm$. Suponga que la lente tiene una distancia focal de 1 metro y la pantalla mide $10 cm$ (...) Cree una visualización de cómo se vería el patrón de difracción en la pantalla usando la función imshow" # + [markdown] id="CMbTnQJKOmm2" # **Por último, debe de rellenar la siguiente encuesta de satisfacción:** https://forms.gle/4WU3ovvP7gJMj8GA7 (ingresar con el correo que se inscribió). # + [markdown] id="LmD7y2aCOmm2" # Enviar sus respuestas a nuestro correo: <EMAIL> # # **OJO: El correo debe de tener como asunto:** # **"Ejercicios_IIEVFC"** # # *Caso contrario su solución puede quedar sin evaluar debido a la gran cantidad de correos.* # + [markdown] id="6ucKfX5Q-7Ga" # # ## Donaciones # <br> # <center> # Puedes donar una vez en el siguiente enlace (Ko-Fi): # # <br> # # *Click en la imagen.* # # <a href="https://ko-fi.com/rcrdphysics"> # <img src="https://habrastorage.org/webt/8r/ml/xf/8rmlxfpdzukegpxa62cxlfvgkqe.png" width=20% /> # # </center>
soluciones.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <h1>Product Add</h1> # # Script to add a new product to list of them. Location is /home/wcmckee/sellcoffee/products. # # Better to have a file for products or a folder? Might be easier to have a folder. # Each product has rsa key - public and private key. # # Make json restful api that has product list. Append new product to it whenever it's updated. # import os import time import datetime import json #from passlib.hash import pbkdf2_sha256 #import crypt, getpass, spwd #from Crypto.PublicKey import RSA yearz = time.strftime("%y", time.gmtime()) monthz = time.strftime("%m", time.gmtime()) dayz = time.strftime("%d", time.gmtime()) datez = yearz + monthz + dayz prodirz = ('/home/wcmckee/sellcoffee/products/') prolis = os.listdir(prodirz) print(prolis) prodoadd = input('Name of product to add: ') prodirc = (prodirz + prodoadd) os.mkdir(prodirc) # + opmeta = open(prodirc + '/' + prodoadd + '-amount', 'w') opmeta.write('0') opmeta.close() # - opforc = open(prodirc + prodoadd + 'index.json', 'w') dicprod = dict() dicprod # + #id for each item. Starts at 1000 and up to 9999 #to make id it gets len of items in products folder and plus #1000. totprods = len(prolis) newids = totprods + 1000 opmeta = open(prodirc + '/' + prodoadd + '-id', 'w') opmeta.write(str(newids)) opmeta.close() # - #switching to folders for each product. Inside folder has rsa #and rsa.public. # + dicprod.update({'name' : prodoadd}) dicprod.update({'id' : str(newids)}) dicprod.update({'datecreate' : datez}) # - tigm = time.strftime("%H %M %S", time.gmtime()) timech =tigm.replace(' ', '') dicprod.update({'timecreate' : timech}) dicprod prodoaddz = dict() prodoaddz.update({prodoadd : dicprod}) prodoaddz json.dumps(prodoaddz) timech ti # + #opnewp = open(prodirz + prodoadd, 'w') ##opnewp.write('0') # + #new_key = RSA.generate(2048, e=65537) #public_key = new_key.publickey().exportKey("PEM") #private_key = new_key.exportKey("PEM") #print private_key #sapriv = open(prodirc + '/' + prodoadd, 'w') #sapriv.write(private_key) #sapriv.close() #print public_key #papriv = open(prodirc + '/' + prodoadd + '.pub', 'w') #papriv.write(public_key) #papriv.close() # - #dict that has all the product details. When it was first #created. Price. #amount sold. This value will be zero when a product is created #but add 1 with productsell script. #Store amount sold data in dict #or just a file called productname-amountsold. # + #opnewp.close() # + #for prol in prolis: # print prol # adprod = open(prodirz + prol + '.meta', 'w') # adprod.write( # - # + #adprod = open(pro # -
.ipynb_checkpoints/productadd-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Player Analysis # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # - # ## Data Cleaning and Exploration # + df1 = pd.read_csv("matches.csv") df2 = pd.read_csv("deliveries.csv") df1.rename(columns={"id" : 'match_id'}, inplace=True) matches= pd.merge(df1, df2, on='match_id', how='outer') # - matches.columns # + matches = matches[['match_id', 'season','team1', 'team2', 'toss_winner','toss_decision','winner', 'inning', 'batting_team', 'bowling_team', 'over', 'ball', 'batsman','non_striker', 'bowler','wide_runs','bye_runs', 'legbye_runs','noball_runs', 'batsman_runs', 'extra_runs', 'total_runs', 'player_dismissed', 'dismissal_kind']] matches.shape # - matches.head() # ### Replacing DD with DC dicti = {"Delhi Daredevils":"Delhi Capitals"} matches["team1"]=matches["team1"].replace(dicti) matches["team2"]=matches["team2"].replace(dicti) matches["toss_winner"]=matches["toss_winner"].replace(dicti) matches["batting_team"]=matches["batting_team"].replace(dicti) matches["bowling_team"]=matches["bowling_team"].replace(dicti) matches["winner"]=matches["winner"].replace(dicti) # ## Taking in Consideration only DC VS KXIP matches # + KXIP_DC=matches[np.logical_or(np.logical_and(matches['team1']=='Delhi Capitals',matches['team2']=='Kings XI Punjab'), np.logical_and(matches['team2']=='Delhi Capitals',matches['team1']=='Kings XI Punjab'))] KXIP_DC.head() # - KXIP_DC.match_id.nunique() # ## KL Rahul's strike rate in first 6 overs # + rahul = KXIP_DC[(KXIP_DC["batting_team"]=="Kings XI Punjab") & (KXIP_DC["batsman"]=="KL Rahul")] rahul = rahul[rahul["over"]<=6] rahul = rahul[rahul["wide_runs"]==0] rahul.iloc[30:,4:].head() # - rahul.over.unique() rahul.season.unique() rahul.match_id.unique() # + strike =rahul.groupby(["season",'match_id','batsman']).agg({"batsman_runs":"sum","ball":"count"}) print("Runs scored by KL Rahul against DC :") strike # + strike_rate = pd.DataFrame((strike['batsman_runs']/strike["ball"]) * 100) strike_rate.columns = ["strike rate"] print("Strike Rate of KL Rahul while batting against DC :") strike_rate # - sns.distplot(strike_rate["strike rate"]) print("Average strike rate of Rahul against DC :",round(strike_rate['strike rate'].mean())) # - Overall # + rahul_overall = matches[(matches["batting_team"]=="Kings XI Punjab") & (matches["batsman"]=="KL Rahul")] rahul_overall = rahul_overall[rahul_overall["over"]<=6] rahul_overall = rahul_overall[rahul_overall["wide_runs"]==0] rahul_overall.iloc[30:,4:].head() # - rahul_overall.match_id.nunique() rahul_overall.over.unique() # + strike_overall =rahul_overall.groupby(["season",'match_id','batsman']).agg({"batsman_runs":"sum","ball":"count"}) print("Runs scored by KL Rahul :") strike_overall # + strike_rate_overall = pd.DataFrame((strike_overall['batsman_runs']/strike_overall["ball"]) * 100) strike_rate_overall.columns = ["strike rate"] print("Strike Rate of KL Rahul (overall) :") strike_rate_overall # - sns.distplot(strike_rate_overall["strike rate"]) print("Average strike rate of Rahul :",round(strike_rate_overall['strike rate'].mean())) # ## Confidence Interval strike_rate_overall.describe().T # + mean = strike_rate_overall['strike rate'].mean() sd = strike_rate_overall['strike rate'].std() n = len(strike_rate_overall['strike rate']) n # - tstar = 2.064 se = sd/np.sqrt(n) se # + lcb = mean - tstar * se ucb = mean + tstar * se lcb = round(lcb) ucb = round(ucb) print("95% Confidence Interval for the strike rate with which Rahul is expected to score today :{}".format((lcb, ucb))) # - # ## Iyer 's contribuion to DC's total score # - Against KXIP Delhi = KXIP_DC[KXIP_DC["batting_team"]=='Delhi Capitals'] # + total_runs =pd.DataFrame(Delhi.groupby(["match_id"])["total_runs"].sum()) total_runs.reset_index(level=0, inplace=True) total_runs # - iyer = Delhi[Delhi["batsman"]=="<NAME>"] iyer = iyer[iyer["wide_runs"]==0] # + iyer_score = pd.DataFrame(iyer.groupby(["match_id"])["batsman_runs"].sum()) iyer_score.columns = ["Iyer's runs"] iyer_score.reset_index(level=0, inplace=True) iyer_score # + final = pd.merge(iyer_score, total_runs, on='match_id', how='inner') final # + final["contribution"] = round((final["Iyer's runs"]/final["total_runs"]) * 100) final # - sns.distplot(final["contribution"]) print("Average contribution of Iyer in DC's inning against KXIP :{}%".format(round(final["contribution"].mean()))) # - Overall Delhi_overall = matches[matches["batting_team"]=='Delhi Capitals'] cond1 = Delhi_overall["season"] == 2018 cond2 = Delhi_overall["season"] == 2019 recent_overall = Delhi_overall[cond1 | cond2] recent_overall.head() # + total_runs12 =pd.DataFrame(recent_overall.groupby(["match_id"])["total_runs"].sum()) total_runs12.reset_index(level=0, inplace=True) total_runs12 # - iyer_overall = recent_overall[recent_overall["batsman"]=="<NAME>"] iyer_overall = iyer_overall[iyer_overall["wide_runs"]==0] # + iyer_score_overall = pd.DataFrame(iyer_overall.groupby(["match_id"])["batsman_runs"].sum()) iyer_score_overall.columns = ["Iyer's runs"] iyer_score_overall.reset_index(level=0, inplace=True) iyer_score_overall # + final_overall = pd.merge(total_runs12,iyer_score_overall , on='match_id', how='inner') final_overall # + final_overall["contribution"] = round((final_overall["Iyer's runs"]/final_overall["total_runs"]) * 100) final_overall # - sns.distplot(final_overall["contribution"]) sns.boxplot(final_overall["contribution"]) print("Average contribution of Iyer in DC's inning overall in recent years :{}%".format(round(final_overall["contribution"].mean()))) # ## wickets KXIP will take excluding Shami's wicket # + punjab = KXIP_DC[KXIP_DC["bowling_team"]=="Kings XI Punjab"] punjab = punjab[(punjab["over"]>5) & (punjab["over"]<=20)] punjab = punjab[punjab["season"]==2019] punjab.iloc[:,2:].head() # - except_shami = punjab[punjab["bowler"]!='<NAME>'] except_shami = except_shami[except_shami["dismissal_kind"] !="run out"] except_shami.match_id.nunique() punjab.bowler.nunique() except_shami.bowler.nunique() # + wickets = pd.DataFrame(except_shami.groupby(["season","match_id"])["player_dismissed"].count()) wickets.columns = ['except Shami'] wickets # + shami = punjab[punjab["bowler"]=="<NAME>"] shami = shami[shami["dismissal_kind"] !="run out"] wickets["shami_wicket"] = shami.groupby(["season","match_id"])["dismissal_kind"].count() wickets # - print("Average contribution of bowlers of KXIP (except Shami) in 6-20th over :{} wickets".format(round(wickets["except Shami"].mean()))) # We only have two matches of KXIP when Shami is also in playing XI so this might not help us in predicting wickets . # # So to have an better idea of posiblity of wickets that other bowlers can take except Shami , we need to analyse their individual performances against DC batsman in recent years. # Since, Delhi's team playing XI has completely changed in recent 2-3 years . So it might be not usefull to analyse their complete overall performance that's why considering only last 3 year's performance abcd = matches[matches["batting_team"]=="Delhi Capitals"] abcd = abcd[(abcd["season"]==2017) | (abcd["season"]==2018) | (abcd["season"]==2019)] # - <NAME> # + maxwell = abcd[(abcd["bowler"]=="<NAME>")] maxwell = maxwell[maxwell["dismissal_kind"] !="run out"] maxwell.head() # - maxwell.bowler.unique() maxwell.match_id.nunique() # + Max = pd.DataFrame(maxwell.groupby("match_id")["dismissal_kind"].count()) Max.columns = ["wickets taken"] Max # - # - <NAME> # + ashwin = abcd[(abcd["bowler"]=="<NAME>")] ashwin = ashwin[ashwin["dismissal_kind"] !="run out"] ashwin.head() # - ashwin.bowler.unique() ashwin.match_id.nunique() # + ash = pd.DataFrame(ashwin.groupby("match_id")["dismissal_kind"].count()) ash.columns = ["wickets taken"] ash # - # So taking in consideration all the information regarding bowlers of KXIP which have played IPL in past recent years also (against DC) , we can se that on an average generally 4-5 wickets out of total wickets are taken against DC batsman by bowlers other then Shami. # ## Gayle Contribution # Since Gayle is a part of KXIP from last year only , so considering his performance of last 4 years to get an better idea. gayle = matches[matches["batsman"]=="<NAME>"] gayle = gayle[(gayle["season"]==2016) | (gayle["season"]==2017) | (gayle["season"]==2018) | (gayle["season"]==2019)] # + gayle = gayle[gayle["wide_runs"]==0] gayle.head() # - gayle.match_id.nunique() # + strike_rate = gayle.groupby(["season","match_id","batsman"]).agg({ "batsman_runs":"sum","ball" : 'count'}) print("Runs scored by Gayle overall :") strike_rate.head() # + strike_rate["strike rate"] = pd.DataFrame(strike['batsman_runs']/strike["ball"] * 100) print("Strike Rate of Gayle while batting :") strike_rate # - sns.distplot(strike_rate['strike rate']) sns.boxplot(strike_rate['strike rate']) print("On an average strike rate of Gayle :",round(strike_rate['strike rate'].mean())) # ### Confidence Interval strike_rate.describe().iloc[:,2:].T # + mean = strike_rate['strike rate'].mean() sd = strike_rate['strike rate'].std() n = len(strike_rate['strike rate']) n # - tstar = 2.064 se = sd/np.sqrt(n) se # + lcb = mean - tstar * se ucb = mean + tstar * se lcb = round(lcb) ucb = round(ucb) print("95% Confidence Interval for the strike rate with which Gayle can score today against DC :{}".format((lcb, ucb))) # - # - Gayle Dismissal count = gayle.match_id.nunique() # + dismiss = pd.DataFrame(gayle.groupby("match_id")["dismissal_kind"].count()) total = dismiss["dismissal_kind"].sum() # + gayle_dismissal_percent = total/count print("Out of last {} matches Gayle has been dismissed in {} innings".format(count , total)) print("Dismissal percentage of Gayle getting out is {}%".format(round(gayle_dismissal_percent *100))) # - # So taking in consideration all the above information regarding Gayle's performance in recent years and also his current form at present we can predict Gayle to score with a strike rate of 130+ and major chances are that he would be out.
KXIP VS DC/Player Analysis KXIP VS DC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import folium from folium.plugins import MarkerCluster from geopy.exc import GeocoderTimedOut from geopy.geocoders import Nominatim # %matplotlib inline # - # ### Merge NDC Deliveries with NDC Partners to get Partners, Addresses, and Total Diaper Deliveries to be used in a map #import NDC deliveries csv deliveries = pd.read_csv('../data/Nash Diaper Stastistics - 2020 Deliveries.csv') print(deliveries.shape) deliveries.head() #import NDC deliveries csv partners = pd.read_csv('../data/Nash Diaper Stastistics - 2020 Partner Agencies.csv') print(partners.shape) partners.head() partners.info() # + # find longitude, latitude for addresses # declare an empty list to store # latitude and longitude of values # of city column longitude = [] latitude = [] # function to find the coordinate # of a given address def findGeocode(address): # try and catch is used to overcome # the exception thrown by geolocator # using geocodertimedout try: # Specify the user_agent as your # app name it should not be none geolocator = Nominatim(user_agent="paul") return geolocator.geocode(address) except GeocoderTimedOut: return findGeocode(address) # each value from address column # will be fetched and sent to # function find_geocode for i in (partners['Address']): if findGeocode(i) != None: loc = findGeocode(i) # coordinates returned from # function is stored into # two separate list latitude.append(loc.latitude) longitude.append(loc.longitude) # if coordinate for a city not # found, insert "NaN" indicating # missing value else: latitude.append('NaN') longitude.append('NaN') #Showing the output produced as dataframe. #filter_none #brightness_4 # now add this column to dataframe partners["Longitude"] = longitude partners["Latitude"] = latitude partners.head() # - #merge the two files to get addresses for partners ndc_deliveries = pd.merge(deliveries, partners, left_on = 'Partners', right_on = 'Partner', how = 'left') ndc_deliveries.head() ndc_deliveries.info() #check for null addresses ndc_deliveries[ndc_deliveries['Address'].isnull()] #only keep certain columns ndc_deliveries = ndc_deliveries[['Partners', 'Address', 'Total', 'Longitude', 'Latitude']].copy() ndc_deliveries.head() #group by Partners and add values in total column ndc_deliveries = ndc_deliveries.groupby(['Partners','Address', 'Longitude', 'Latitude'], as_index=False)['Total'].sum() ndc_deliveries #drop rows with null addresses ndc_deliveries=ndc_deliveries.dropna()#dropna(subset=['Address', 'Longitude', 'Latitude']) ndc_deliveries.info() #export as csv ndc_deliveries=ndc_deliveries.to_csv('ndc_deliveries.csv', index = False)
notebooks/ndc_map_prep.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false, "name": "#%%\n"} # create some x and y date from matplotlib import pyplot as plt x_list = [-4.0, -2.0, 3.0, 4.0, 5.0, 6.0] y_list = list() m = 2 b = 4 for x in x_list: y = (m*x) + b y_list.append(y) plt.plot(x_list, y_list) # + pycharm={"is_executing": false} def lin_model_single_ele(m, x, b): """ Returns a single y for a given x using a line """ return (m*x) + b x_list = [1, 2, 3, 4, 5, 6] y_observed = [2.11, 3.87, 6.01, 7.93, 9.99, 12.12] y_predicted = [] m = 2 b = 4 for x in x_list: y = lin_model_single_ele(m, x, b) y_predicted.append(y) print(y_predicted[0]) # + pycharm={"is_executing": false, "name": "#%%\n"} def calculate_residual(y_pred, y_obs): """ Returns the residual of either a point or array """ return y_pred-y_obs residuals = [] for i in range(0, len(y_predicted)): residual = calculate_residual(y_predicted[i], y_observed[i]) residuals.append(residual) print(residuals) # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} from statistics import mean def calculate_ssr(y_pred, y_obs, deci=4): """ Calculates the Sum of Squared Residuals using the observed and predicted y""" res = [calculate_residual(a_i, b_i) for a_i, b_i in zip(y_pred, y_obs)] return round(sum([d * d for d in res]), deci) def calculate_tss(y_obs, deci=4): y_mean = mean(y_obs) delta = [y - y_mean for y in y_obs] return round(sum([d * d for d in delta]), deci) def calculate_rsquared(ssr, tss, deci=4): return round(1-(ssr/tss), 4) # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} tss = calculate_tss(y_observed) ssr = calculate_ssr(y_observed, y_predicted) r2 = calculate_rsquared(ssr, tss) print('SSE: %s' % ssr) print('R^2: %s' % r2) # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} def fit_line(x_input, y_observed, m_max=5, c_max=5, print_output=True): best_model = { 'm': None, 'c': None, 'SSR': None, 'R^2': None } assert len(x_input) == len(y_observed), 'Input vectors have differing lengths' tss = calculate_tss(y_observed) for m in range(1, m_max): for c in range(0, c_max): y_model = list() for x_i in x_input: y_model.append(lin_model_single_ele(m, x_i, c)) ssr = calculate_ssr(y_observed, y_model) r2 = calculate_rsquared(ssr, tss) if print_output: print('#####') print('using: m=%s and c=%s' % (m, c)) print('actual: %s' % y_observed) print('model: %s' % y_model) print('SSR: %s' % ssr) print('R^2: %s' % r2) print('#####') if best_model['SSR'] is None or best_model['SSR'] > ssr: best_model['m'] = m best_model['c'] = c best_model['SSR'] = ssr best_model['R^2'] = r2 return best_model # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} my_model = fit_line(x_list, y_observed, print_output=False) print('best model: %s' % my_model) # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} from sklearn import linear_model import numpy as np lm = linear_model.LinearRegression() X = np.array(x_list).reshape(-1, 1) Y = np.array(y_observed).reshape(-1, 1) model = lm.fit(X, Y) print(model.coef_) print(model.intercept_) print(model.score(X, Y)) # + pycharm={"is_executing": false, "name": "#%%\n"} # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} tss = calculate_tss(y_observed) ssr = calculate_ssr(y_observed, y_predicted) r2 = calculate_rsquared(ssr, tss) print('SSE: %s' % ssr) print('R^2: %s' % r2) # + pycharm={"is_executing": false, "name": "#%%\n"} def fit_line(x_input, y_observed, m_max=5, c_max=5, print_output=True): best_model = { 'm': None, 'c': None, 'SSE': None, 'R^2': None } assert len(x_input) == len(y_observed), 'Input vectors have differing lengths' tss = calculate_tss(y_observed) for m in range(1, m_max): for c in range(0, c_max): y_model = list() for x_i in x_input: y_i = (m*x_i) + c y_model.append(y_i) ssr = calculate_ssr(y_observed, y_model) r2 = calculate_rsquared(ssr, tss) if print_output: print('#####') print('using: m=%s and c=%s' % (m, c)) print('actual: %s' % y_observed) print('model: %s' % y_model) print('SSE: %s' % ssr) print('R^2: %s' % r2) print('#####') if best_model['SSE'] is None or best_model['SSE'] > ssr: best_model['m'] = m best_model['c'] = c best_model['SSE'] = ssr best_model['R^2'] = r2 return best_model # + pycharm={"is_executing": false, "name": "#%%\n"} my_model = fit_line(x_list, y_observed, print_output=False) print('best model: %s' % my_model) # + pycharm={"is_executing": false, "name": "#%%\n"} from sklearn import linear_model import numpy as np lm = linear_model.LinearRegression() X = np.array(x_list).reshape(-1, 1) Y = np.array(y_observed).reshape(-1, 1) model = lm.fit(X, Y) print(model.score(X, Y)) # + jupyter={"outputs_hidden": false} pycharm={"is_executing": false, "name": "#%%\n"} import pandas as pd # load data path_to_data = './data/spady_aerobic_performance.csv' ceph_data = pd.read_csv(path_to_data, delimiter=',') # get mass and mantle length mass = ceph_data['Mass'].values.reshape(-1,1) ml = ceph_data['ML'].values.reshape(-1,1) # create linear model lm = linear_model.LinearRegression() model = lm.fit(mass, ml) print('R-squared: %s' % round(model.score(mass, ml),3)) print('-- m: %s' % round(model.coef_[0][0],2)) print('-- b: %s' % round(model.intercept_[0],1))
ch6-regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="eT9w7TLcaEG4" # # Outliers (valores atípicos) # + [markdown] id="bzt9VMSeaEG5" # Mientras estamos desarrollando nuestro análisis de datos, ya sea con un objetivo puro de análisis o para realizar un preprocesamiento de datos antes de utilizar nuestros algoritmos de predicción, podemos encontrarnos algunos valores que, debido a su naturaleza, deberemos tener en consideración para que no afecten nuestro estudio. Dentro de este grupo destacan los valores nulos, que acabamos de ver en el notebook anterior, y los valores atípicos (o *outliers*), que los veremos a continuación. # # En este caso, a diferencia de los valores nulos, donde es más común referirse a ellos como nulos en lugar de missings, en el caso de los valores atípicos se suele optar por el termino anglosajón "outliers". # - # ## Conociendo a los outliers # # Según Wikipedia: # # >En estadística, un valor atípico (en inglés *outlier*) es una observación numéricamente distante del resto de los datos, haciendo que las estadísticas derivadas de los conjuntos de datos que incluyen este tipo de valores serán frecuentemente engañosas. # # # La definición anterior sugiere que el valor atípico es algo que es diferente de la multitud, del resto de datos. Pero, si bien es cierto que a menudo se dice que cada uno tenemos que ser uno mismo, en este caso puede que no sea tna bueno salirse de lo normal. # # <br> # # Comencemos por algo sencillo. Fíjate en la siguiente lista de valores, ¿ves algo diferente? valores = [15, 16, 19, 18, 54, 17, 17, 11, 19] valores # Efectivamente, hay un valor que se sale de lo común. Si nos fijamos, todos los datos están entre 15 y 20... Bueno, ¡¡todos menos el 54!! Se trata de un outlier. # ## Datos y outliers # # Ahora que ya sabemos que es un outlier, nos pueden venir muchas preguntas a la cabeza como, por ejemplo, "¿cómo se ha metido ese valor ahí?". # # Un proyecto de análisis de datos siempre comienza con la obtención de datos a analizar, y es aquí cuando estos truhanes aprovechan para colarse en nuestros datos. Son tan pillos que sería casi imposible detectarlos en este punto, ya que pueden aprovechar un fallo durante la recopilación de los datos o, simplemente, puede que sean así por naturaleza, de modo que indiquen cierta variación en nuestros datos. # # Pero dejemos de hablar y veamos datos, quiero ver datos. En este caso, vamos a utilizar como ejemplo un conjunto de datos de fútbol que... No, siempre fútbol, no. Mejor un ejemplo con jugadores de cricket. Vamos a suponer que estamos trabajando como analistas deportivos y queremos estudiar el desempeño del equipo indio de cricket, que lo haremos a partir de los puntos de cada jugador (cuyos nombres son totalmente reales): import pandas as pd scores = pd.DataFrame([{"Player": "Player1", "Score": 500}, {"Player": "Player2", "Score": 350}, {"Player": "Player3", "Score": 10}, {"Player": "Player4", "Score": 450}, {"Player": "Player5", "Score": 300}]) scores # Si nos fijamos en los datos, podemos observar que todos los jugadores salvo "Player3" han conseguido puntuaciones de 300 o mayores, mientras que "Player3" solo ha logrado 10, lo que puede significar que o bien nos hemos equivocado al apuntar su puntuación o bien es que este jugador debería plantearse cambiar de deporte. # # Ahora que sabemos que los valores atípicos pueden ser un error o simplemente una variación, ¿cómo decidimos si son importantes o no? Bueno, es bastante simple: si son el resultado de un error, podemos ignorarlos; pero si es solo una variación en los datos, deberíamos pensar un poco más. Antes de tratar de entender si ignorar los valores atípicos o no, debemos conocer las formas de identificarlos. # ## Identificando valores atípicos # # En vista de lo anterior, podríamos pensar que esto es pan comido, echo un vistazo a los datos y saco los que se salgan un poco y ya, como acabamos de hacer para el ejemplo del cricket. # # Bueno... Pues no. Ahora estábamos utilizando un conjunto de datos de 5 registros y 2 columnas, pero normalmente tendremos más, mucho más. Imagínate que te plantas con un conjunto de datos de +500 columnas y +10 mil filas, ¿también podrías encontrar los outliers manualmente a simple vista? A ver, poder podrías, pero echarías un buen rato, así que mejor utilizar métodos gráficos o estadísticos que nos faciliten el trabajo. En este notebook discutiremos algunos de ellos. # # <br> # # Para ello, comenzaremos con un dataset de los precios de las casas de Boston, el cual está incluido en la librería ``sklearn``, que en el futuro será una de nuestras mejores amigas, cuando nos pongamos con el ``feature engineering`` y veamos los algoritmos de aprendizaje. # Entonces, comencemos. # + from sklearn.datasets import load_boston boston = load_boston() x = boston.data columns = boston.feature_names #Creamos el DataFrame: boston_df = pd.DataFrame(boston.data) boston_df.columns = columns print("Filas: %s, Columnas: %s" %(boston_df.shape)) boston_df.head() # - # Las características que mostramos en el conjunto de datos se utilizarán para buscar cualquier valor atípico. Mirando los datos anteriores, parece que solo tenemos valores numéricos, es decir, no necesitamos hacer ningún formateo de datos. (Música épica). # # Podemos diferenciar dos tipos de análisis para encontrar los valores atípicos: univariante (análisis de outliers de una variable) y multivariante (análisis de outliers de dos o más variables). Para simplificar las cosas, comenzaremos con el método básico de detección de valores atípicos y avanzaremos lentamente hacia métodos más avanzados. # ## Análisis gráfico # # En este apartado veremos cómo detectar outliers de forma visual, para lo que utilizaremos ciertas representaciones gráficas. No te preocupes si no las entiendes todavía, al final de este bloque (Bloque 1 - Data Analysis) veremos un montón de formas de representar los datos. Además, para matar el gusanillo, en 2 notebooks veremos una introducción al análisis exploratorio, para lo que introduciremos ciertas visualizaciones. Pero ahora, centrémonos en los outliers: # # # ### Diagrama de caja (boxplot) # # Según Wikipedia: # # >También conocido como diagrama de caja y bigote, box plot, box-plot o boxplot. Es un método estandarizado para representar gráficamente una serie de datos numéricos a través de sus cuartiles. De esta manera, el diagrama de caja muestra a simple vista la mediana y los cuartiles de los datos, pudiendo también representar los valores atípicos de estos como puntos individuales. # # La definición anterior sugiere que si hay un valor atípico, se trazará como un punto en el diagrama de caja, agrupando en cajas el resto de la población. # # Veámoslo con un ejemplo. Para ello, utilizaremos la librería ``seaborn``, que será oficialmente presentada en futuros notebooks: # + import seaborn as sns # Por ejemplo, representemos la columnas "DIS" sns.boxplot(x=boston_df['DIS']) # - # Como podemos observar, el diagrama de caja anterior muestra tres puntos entre 10 y 12. Estos son los valores atípicos, ya que no están incluidos en el cuadro de otra observación, es decir, no están cerca de los cuartiles. # # # De este modo, estamos analizando los valores atípicos univariantes, es decir, estamos usando la columna ``DIS`` solo para verificar sus valores atípicos, sin tener en cuenta a nadie más. Sin embargo, también podemos hacer análisis de valores atípicos multivariantes. # # ¿Y cómo hacemos esto? ¿Podemos hacerlo con el diagrama de caja? Bueno, la respuesta más correcta sería depende. Si tuviera valores categóricos, podríamos usarlos con cualquier variable continua y hacer un análisis de valores atípicos multivariante. Lamentablemente, como no tenemos variables categóricas (recordemos que son todas numéricas), mejor olvidarnos de usar el diagrama de caja para este análisis de valores atípicos multivariante. # ## Gráfico de dispersión (scatter plot) # # Según Wikipedia: # # > Un diagrama de dispersión, gráfica de dispersión o gráfico de burbujas es un tipo de diagrama matemático que utiliza las coordenadas cartesianas para mostrar los valores de dos variables para un conjunto de datos. Los datos se muestran como una colección de puntos, cada uno con el valor de una variable que determina la posición en el eje horizontal y el valor de la otra variable que determina la posición en el eje vertical. # # Como sugiere la definición, el diagrama de dispersión es la colección de puntos que muestra valores para dos variables. Podemos intentar dibujar un diagrama de dispersión para dos variables de nuestro conjunto de datos de vivienda. # # Veamos un ejemplo con las columnas ``INDUS`` y ``TAX``: # + import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(16,8)) ax.scatter(boston_df['INDUS'], boston_df['TAX']) ax.set_xlabel('Proporción de acres comerciales no minoristas por ciudad') ax.set_ylabel('Tasa de impuesto a la propiedad de valor total por 10 000 $') plt.show() # - # Observando este gráfico, podemos ver que la mayoría de los puntos de datos se encuentran en el lado inferior izquierdo. Sin embargo, también vemos que hay alguno que se diferencia del resto aislándose hacia arriba a la derecha. # ## Análisis matemático # # Hasta ahora, hemos visto cómo detectar outliers de la manera sencilla, con gráficos. Sin embargo, la más útil vendrá por la parte matemática, ya que nos permitirá obtener programáticamente qué datos son más propensos a ser outliers y, posteriormente, aplicarles algún tratamiento. # # ### Z score (unidad tipificada) # # Según Wikipedia: # # > El término unidad tipificada, variable centrada reducida, variable estandarizada o normalizada se utiliza en estadística para comparar datos procedentes de diferentes muestras o poblaciones y se define como el número de desviaciones típicas que un valor dado toma con respecto a la media de su muestra o población. # # La intuición detrás del Z-score es describir cualquier punto de datos encontrando su relación con la desviación estándar y la media del grupo de puntos de datos. Lo que representa el valor obtenido a través de la unidad tipificada es el equivalente en una distribución normal, es decir, una distribución de media 0 y desviación estándar igual a 1. # # # Entonces, ¿cómo nos puede ayudar esto a identificar los valores atípicos? Bueno, dado que al calcular el Z-score estamos escalando y centrando los datos, podríamos obtener los puntos de datos que estén demasiado lejos de cero. Estos puntos se tratarán como valores atípicos. En la mayoría de los casos, se utiliza un umbral de 3 o -3, es decir, si el valor del Z-score es mayor o menor que 3 o -3 respectivamente, ese punto de datos se identificará como valor atípico. # # Para implementarlo en nuestros códigos, utilizaremos una función definida en la biblioteca ``scipy``: # + from scipy import stats import numpy as np z = np.abs(stats.zscore(boston_df)) print(z) # - # Solamente con lo que estamos viendo aquí sería difícil sacar a ojo cuáles son los outliers. Para ello, tendremos que aplicar un filtro, que será el umbral que hemos comentado anteriormente cuando decíamos que se consideraría outlier si estuviera fuera del rango [-3, 3]. Como hemos calculado el valor absoluto, simplemente tendremos que quedarnos con los datos mayores que 3 para encontrar los outliers. # # Hemos visto diferentes formas de atacar este problema de filtrado, pero en este caso utilizaremos la función ``where`` de NumPy: umbral = 3 print(np.where(z > umbral)) # Lo que nos devuelve este filtro es una tupla con 2 arrays que hacer referencia a la posición de cada uno de los outliers, donde el primer array indica el número de fila, y el segundo, el de columna: print(z[55][1]) # Así que el registro número 55 de la columna 1 (``ZN``) es un outlier. Y así con el resto de los valores cuyas posiciones hemos sacado anteriormente. # ## IQR-score (Rango intercuartílico) # El diagrama de caja usa el método basado en el Rango intercuartílico para mostrar los datos y valores atípicos. Sin embargo, para obtener una lista de valores atípicos identificados, necesitaremos usar la fórmula matemática y recuperar los datos atípicos. # Según Wikipedia: # # > El rango intercuartílico es una medida de variabilidad adecuada cuando la medida de posición central empleada ha sido la mediana. Se define como la diferencia entre el tercer cuartil (Q3) y el primer cuartil (Q1), es decir: RQ = Q3 - Q1. A la mitad del rango intercuartil se le conoce como desviación cuartil (DQ), y es afectada muy poco por cuentas extremas. Esto lo hace una buena medida de dispersión para distribuciones sesgadas: DQ = RQ/2= (Q3 - Q1)/2. # > # > Se usa para construir los diagramas de caja y bigote (box plots) que sirven para visualizar la variabilidad de una variable y comparar distribuciones de la misma variable; además de ubicar valores extremos. # > # > Es una medida de dispersión similar a la desviación típica o varianza, pero es mucho más robusta ante outliers. # # # El IQR es algo similar al Z-score en términos de encontrar la distribución de datos y luego mantener un umbral para identificar el valor atípico. # Podemos combinar el diagrama de caja con IQR y usarlo para encontrar la lista de valores atípicos como hicimos con el cálculo de la unidad tipificada. # # En primer lugar, calcularemos el IQR: Q1 = boston_df.quantile(0.25) Q3 = boston_df.quantile(0.75) IQR = Q3 - Q1 print(IQR) # Como ahora tenemos los valores de IQR, podemos pasar a detectar los outliers. Para ello, aplicaremos al DataFrame una máscara que nos filtrará los valores que se salgan del intervalo definido por **[Q1 - 1.5 IQR, Q3 + 1.5 IQR]**. (boston_df < (Q1 - 1.5 * IQR)) | (boston_df > (Q3 + 1.5 * IQR)) # Ahora que sabemos cómo detectar los valores atípicos, es importante comprender si es necesario eliminarlos o corregirlos. # # A continuación, veremos algunos métodos para eliminar los valores atípicos y, si es necesario, imputar nuevos valores. # ## Trabajando con Outliers # # Cuando, al realizar nuestro análisis de datos, detectamos un outlier, nos enfrentamos a una difícil decisión (que será la misma que en el caso de los nulos), ¿cómo debemos tratarlo?, ¿lo eliminamos o lo corregimos? Antes de hablar de esto, veremos algunos métodos para eliminar los valores atípicos. # # ### Z-score # # En el apartado anterior, hemos visto cómo se pueden detectar los valores atípicos utilizando el Z-score, pero ahora queremos eliminar o filtrar los valores atípicos y obtener los datos limpios. Esto se puede hacer de forma muy sencilla, apoyándonos en lo que hemos realizado anteriormente, pues solo será cosa de un filtro (aunque un tanto complejo): boston_df[(z < 3).all(axis=1)] # Si nos fijamos, el resultado que nos devuelve esta operación es un DataFrame con 415 filas, es decir, más de 90 filas de diferenia con el dataset original. Pero ¿qié ha pasado? # # Fijémonos en la sentencia de filtro: # + jupyter={"outputs_hidden": true} (z < 3).all(axis=1) # - # Lo que estamos haciendo aquí es simplemente calcular qué valores se salen del umbral ``(z < 3)``. Y, después, nos quedamos únicamente con aquellas filas (``axis=1``) que cumplan todo a ``True`` (con el método ``all()``). De este modo, si aplicamos esta máscara sobre nuestro DataFrame, nos devolverá otro eliminando cualquier fila que tenga al menos un outlier según el criterio del Z-score. # ### IQR-score # # Al igual que hemos visto con el Z-score, podemos usar el IQR-score calculado previamente para filtrar los valores atípicos manteniendo solo los valores válidos: mask_2 = ~((boston_df < (Q1 - 1.5 * IQR)) |(boston_df > (Q3 + 1.5 * IQR))).any(axis=1) boston_df[mask_2].shape # Como podemos observar, ahora se nos ha quedado un DataFrame mucho más reducido, ya que este criterio es mucho menos permisivo. # # Si queremos entender qué estamos haciendo en la máscara, podemos analizarla en base a lo visto en el apartado anterior. En él, habíamos dicho que consideraríamos como outlier todo aquello que estuviera fuera del rango [Q1 - 1.5 IQR, Q3 + 1.5 IQR]. Por ello, consideramos ambas opciones con un or para detectar que un valor es un outlier. Del mismo modo que antes, hacemos la agrupación por filas para comprobar que hay o no al menos un outlier en esa fila. # Hasta aquí, estaríamos obteniendo las filas con algún outlier, es decir, tendríamos un ``True`` por cada fila con outliers. Sin embargo, como lo que nos interesa es quitar los outliers, metemos la condición con una negación, haciendo que nos quedemos con aquellas columnas que no tengan ningún outlier. # Finalmente, en cuanto a si un outlier debe ser eliminado o reemplazado es una cosa algo más compleja. # # Básicamente, los datos incorrectos o calculados eróneamente, pueden identificarse como valores atípicos y deben descartarse, pero, al mismo tiempo, es posible que necesitemos corregirlos también, ya que puden cambiar el nivel de datos, es decir, podrían llegar a causar problemas cuando modelemos los datos. # # Por ejemplo, 5 personas reciben un salario de 10K, 20K, 30K, 40K y 50K y, de repente, una de las personas comienza a recibir un salario de 100K. En este caso, poniéndonos en el papel del empleador, hemos realizado un estudio sobre los salarios y nos encontramos cone esto. En este caso, la nueva actualización salarial puede verse como sesgada y es posible que deba aumentar el salario de otro empleado también para mantener el equilibrio. Por lo tanto, puede haber varias razones por las que necesitemos comprender y corregir los valores atípicos. # ## Ejercicio 1 # # 1. Tenemos un grupo de características de diferentes coches definidos según el fichero "coches.csv". Fíjate en los caballos de vapor (columna ``hp``), ¿observas alguna cosa extraña? # 2. Identifica los outliers de forma gráfica # 3. ¿Podrías señalar si existe algún valor atípico en la relación del tiempo que tarda en recorrer 1/4 milla (``qsec``) y el volumen del depósito del coche (``disp``)? # 4. Identifica, mediante el criterio del rango intercuartílico, los outliers que hemos visto en el apartado 2. # 5. Crea una copia del DataFrame de los coches y elimina aquellos registros con outliers. ¿Ha cambiado la forma de nuestro DataFrame? # 6. Crea otro DataFrame de coches en el que sustituyas los outliers por el máximo o el mínimo del resto de valores en función de si se queda fuera del margen por encima o por debajo. # 7. EXTRA: ¿Podrías repetir los apartados 4, 5 y 6, pero con el criterio de Z-score? # + jupyter={"outputs_hidden": true} df = pd.read_csv("coches.csv") df['hp'] # - # 2. sns.boxplot(x=df['hp']) # 3. fig, ax = plt.subplots(figsize=(16,8)) ax.scatter(df['qsec'], df['disp']) ax.set_xlabel('Tiempo 1/4 milla') ax.set_ylabel('Volumen depósito') plt.show() # 4. Q1 = df["hp"].quantile(0.25) Q3 = df["hp"].quantile(0.75) IQR = Q3-Q1 print(Q1) print(Q3) print(IQR) umbral_inf = Q1 - 1.5*IQR umbral_sup = Q3 + 1.5*IQR print(umbral_inf, umbral_sup) df[(df['hp'] > umbral_sup) | (df['hp'] < umbral_inf)] # 5. df[~((df['hp'] > umbral_sup) | (df['hp'] < umbral_inf))].shape # + # 6. df_sin_o = df[~((df['hp'] > umbral_sup) | (df['hp'] < umbral_inf))] max_hp = max(df_sin_o['hp']) min_hp = min(df_sin_o['hp']) df.loc[df['hp'] > umbral_sup, ['hp']] = max_hp df.loc[df['hp'] < umbral_inf, ['hp']] = min_hp df.loc[[28, 31]] # -
Bloque 2 - Data_Analyst/02_Manejo de datos/02_Missings y outliers/04_RESU_3_Outliers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # EX. Functions # ## 1. Argument-matching examples. # First, define the following six functions (either interactively or in a module file that can be imported): def f1(a, b): print(a, b) # Normal args def f2(a, *b): print(a, b) # Positional var args def f3(a, **b): print(a, b) # Keyword var args def f4(a, *b, **c): print(a, b, c) # Mixed modes def f5(a, b=2, c=3): print(a, b, c) # Default values def f6(a, b=2, *c): print(a, b, c) # Defaults and positional var args # Now, test the following calls interactively.. # case a) f1(1, 2) f1(b=2, a=1) # case b) f2(1, 2, 3) f3(1, x=2, y=3) f4(1, 2, 3, x=2, y=3) # case c) f5(1) f5(1, 4) # case d) f6(1) f6(1, 3, 4) # Try to explain each result. # # ## Case a) # def f1(a, b): print(a, b) # # f1(1,2) --> 일반적인 argument를 넣어주면 a = 1, b = 2로 assign된다. # 또 Python은 keyward argument를 지원하기 때문에 f1(b=2, a=1)을 호출하면 # Keyward argument로 인식하여 print(a, b)에 a = 1, b = 2로 assign되기 때문에 case a)의 두 호출 결과가 같다. # # ## Case b) # def f2(a, \*b): print(a, b) # def f3(a, \*\*b): print(a, b) # def f4(a, \*b, \*\*c): print(a, b, c) # # Python의 함수 argument로 '\*'을 넣으면 **tuple 형태**로 묶여 여러개의 인자를 받아온다. 또 '\*\*'을 넣으면 **dictionary 형태**로 묶여 여러개의 인자를 받아올 수 있고, key와 value 쌍으로 argument를 넣어줘야 한다. # # f2(1, 2, 3) 실행 결과는 1 (2, 3)으로 2와 3이 '\*b'의 인자로 받아져 tuple로 묶였고 # f3(1, x=2, y=3) 실행 결과는 1, {'x': 2, ;'y': 3}으로 '\*\*b'의 인자로 받아져 dictionary 로 묶였다. # # argument로 tuple과 dictionary를 모두 사용할 수 있는데 f4()의 경우가 예시이다. # f4(1, 2, 3, x=2, y=3) 실행 결과는 1 (2,3) {'x': 2, 'y': 3}으로 각각 '\*b'와, '\*\*c'의 인자로 받아져 tuple, dictionary로 각각 묶였다. # # # Case c) # def f5(a, b=2, c=3): print(a, b ,c) # # 이 경우는 argument를 선언과 동시에 초기화 해주었는데, # 'b'와 'c' argument에는 값을 넣어주지 않아도 f5(1) 예시처럼 초기화 된 값이 출력되게 되며 f5(1, 4)를 호출하면 'a'에는 1이 assign되고, 'b'는 초기 값 2가 아닌 4로 assign되게 된다. # # # Case d) # def f6(a, b=2, *c) # # f6의 argument 중 'b'는 2로 초기화 해주었고 '\*c'로 tuple을 받아온다. # # 원래 tuple을 생성할 때는 빈 tuple을 생성할 수 없는 것이 python의 원칙인데, 인자로 받는 경우는 빈 tuple이 출력되는 것을 확인할 수 있었다. # # f6(1)을 호출하면 argument 'a'에 1이 assign되고 'b'는 초기화된 2가 출력되고, \*c에 들어오는 것은 없어서 빈 tuple이 출력된다. # # f6(1,3,4)를 호출하면 'a'에 1이 assign, 'b'는 3이 assign되고, \*c에는 4가 tuple의 # 형태로 들어가 원소 1개 짜리 tuple이 만들어진다. # ## 2. Arguments. # Write a function called ```adder``` in a Python module file. The function should accept two arguments and return the sum (or concatenation) of the two. Then, call the ```adder``` function with a variety of object types (two strings, two lists, two floating points). # # Python 모듈 파일에 Adder라는 함수를 쓰십시오. 함수는 두 개의 주장을 수용하고 두 개의 합계를 반환해야 한다. 그런 다음 다양한 객체 유형(줄 2개, 목록 2개, 부동점 2개)으로 애드더 함수를 호출한다. # + def adder(arg1, arg2): # two arguments return arg1 + arg2 # return the sum # test code here print(adder("Park" , "YoungJun")) # two strings print(adder([6,4,3,2] , ["y", "j"])) # two lists print(adder(25.3 , 24.7)) # two floating points # - # ## 3. Variable arguments. # Generalize the adder function you wrote in the above to compute the sum of an arbitrary number of arguments, and change the calls to pass more or fewer than two arguments. # # Hints: a slice such as ```S[:0]``` returns an empty sequence of the same type as S, and the type builtin function can test types # # # 임의 수의 인수의 합계를 계산 # + def adder_s(*arg): for i in arg[:1]: sum = i for k in arg[1:]: sum = sum + k return sum def adder(*arg1): r_val = arg1[0] for i in range(1,len(arg1)): r_val = r_val +arg1[i] return r_val print(adder_s("park", "young", "jun")) print(adder_s([5, 6, 7] , ["y", "j"])) print(adder_s(22.3 , 17.8)) print(adder_s(1,2,3,4)) print(adder_s('hello ','my name ','is ','hyoseop')) print(adder_s([1,2], [3,4], [5])) print(adder_s(12.3, 15.8)) # - # What happens if you pass in arguments of different types? # + # print(adder_s("park", 0, "jun") # '*' argument로 다른 type으로 여러 argument를 넣으면 tuple로 묶이지 않아 오류 SyntaxError가 발생한다. # - # What about passing in dictionaries? # + # print(adder_s({'a': 1, 'b': 2}, {'c': 3, 'd': 4})) # print(adder_s(x=5, y=6)) # '*' argument는 tuple로 만들어주는데 이 때 dictionary의 type으로 argument를 전달하면 # tuple 내부에 dictionary를 더해서 넣을 수 없기 때문에 TypeError를 출력한다. # - # ## 4. Dictionary. # Write a function called ```add_dict(dict1, dict2)``` that computes the union of two dictionaries. It should return a new dictionary containing all the items in both its arguments (which are assumed to be dictionaries). If the same key appears in both arguments, feel free to pick a value from either. # # + def add_dict(dict1, dict2): # computes the uion of two dictionaries sum_d = dict1 sum_d.update(dict2) # update() 메소드 # 두 개의 dictionary를 합쳐줌 # key가 일치하면 둘 중에 큰 value로 assign return sum_d dict1 = {'key1' : 3, 'key2' : 5} dict2 = {'key1' : 9, 'key3' : 4} print(add_dict(dict1, dict2)) # key1의 value가 9로 assign # 다른 풀이 def add_dict2(dict1,dict2): return {**dict1,**dict2} print(add_dict({'a':1,'b':2},{'b':3, 'd':4})) # - # What happens if you pass lists instead of dictionaries? # + # list1 = ['a', 'b', 'c'] # dictionary 대신 list로 선언 # list2 = [1, 2, 3] # print(add_dict(list1, list2)) # update()메소드는 dictionary를 합치는 메소드로 # list에는 update() 메소드가 없어 AttributeError가 출력된다 # - # How could you generalize(rewrite) your function to handle this case, too? (Hint: see the ```type``` built-in function used earlier.) # + def add_dict_g(dict1, dict2): # type을 이용해 받은 인자를 확인 if type(dict1) == list: # argument가 list type이라면 if type(dict2) == list: sum_list = dict1 + dict2 return sum_list # list의 합 반환 sum_dict = dict1 # list type이 아니라면 dict으로 간주 sum_dict.update(dict2) return sum_dict # dict의 합 반환 list1 = [1, 7, 5] list2 = ['p', 'y', 'j'] print(">>> Sum of lists") print(add_dict_g(list1, list2)) dict1 = {'key1' : 1, 'key2' : 2} dict2 = {'key1' : 3, 'key3' : 4} dict3 = {'key4' : 6, 'key5' : 7} print("\n>>> Sum of Dictionary") print(add_dict_g(dict1, dict2)) # - # Type Markdown and LaTeX: α² # > 𝛼²이 argument로 들어오면 𝛼²𝛼 2개를 인자로 받은 것으로 판단함 # 따라서 add_dict_g(𝛼, 𝛼)로 나눠서 넣어 𝛼가 list인 경우 list * 2 의 결과가 출력되고 # 𝛼가 dictionary인 경우 인자로 받은 𝛼가 한 번만 출력되게 된다. # ## 5. Computing factorials. # N!, is computed as N*(N-1)*(N-2)*...1. For instance, 6! is 6*5*4*3*2*1, or 720. Write a recursive function ```fact1(N)``` and an iterative function ```fact2(N)```. (Note: 0! == 0) # + def fact1(N): if N == 1: return N else: return N * fact1(N-1) def fact2(N): factorial = 1 for i in range(1, N + 1): factorial *= i return factorial print(">>> fact1(N) result") print(fact1(1)) print(fact1(6)) print(">>> fact2(N) result") print(fact2(1)) print(fact2(6))
practice/python/EX3_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Karar Ağaçları (Decision Trees) # + # https://erdincuzun.com/makine_ogrenmesi/decision-tree-karar-agaci-id3-algoritmasi-classification-siniflama/ # https://en.wikipedia.org/wiki/Decision_tree # - import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Veri oku # https://rdrr.io/cran/rpart/man/kyphosis.html df = pd.read_csv('https://raw.githubusercontent.com/yasarkucukefe/YBS462/main/data/kyphosis.csv') df.head() # null değerler? df.info() # EDA sns.pairplot(df, hue='Kyphosis') # Train-test split (eğitim ve test verilerinin ayrılması) from sklearn.model_selection import train_test_split X = df.drop('Kyphosis',axis=1) y = df['Kyphosis'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) # Decision Trees Model (Karar Ağaçları Modeli) from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier() dtree.fit(X_train, y_train) # Tahminler tahminler = dtree.predict(X_test) tahminler # Modelin değerlendirilmesi from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test, tahminler)) y_test[y_test=='absent'].size / y_test.size print(confusion_matrix(y_test, tahminler)) from sklearn import tree plt.figure(figsize=(18,12)) tree.plot_tree(dtree, fontsize=10, feature_names=list(df.columns[1:])) plt.show() # ## Random Forests from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=50) # n_estimators=100 (default) rf.fit(X_train, y_train) rf_tahminler = rf.predict(X_test) # print(confusion_matrix(y_test, rf_tahminler)) # ## Uygulama # Veri df = pd.read_csv("https://raw.githubusercontent.com/yasarkucukefe/YBS462/main/data/loan_data.csv") df.info() df.head() df['not.fully.paid'].unique() df['purpose'].unique() # String değerler model oluşturmada kullanılamaz. Bu değerler için yeni kolonlar eklenmelidir. df_final = pd.get_dummies(df, columns=['purpose'], drop_first=True) df_final.head() df_final.info() # + # Hedef değişkeni: not.fully.paid # - X = df_final.drop('not.fully.paid',axis=1) y = df_final['not.fully.paid'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
hafta-5/.ipynb_checkpoints/ders5-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # cuSignal Cheat Sheets sample code # # (c) 2020 NVIDIA, Blazing SQL # # Distributed under Apache License 2.0 # # Imports # + import cusignal import cupy as cp from cupy.fft import fft, fftshift import scipy import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # - # # cuSignal spectral analysis # + fs = 10e3 noise_power = 0.001 * fs / 2 data = cp.repeat(cp.array([0,0,2,1,4,0,0,1,0,0,0,0,1]), 10) fs = 10e3 N = 1e5 amp = 2 * cp.sqrt(2) noise_power = 0.01 * fs / 2 time = cp.arange(N) / float(fs) mod = 500*cp.cos(2*cp.pi*0.25*time) carrier = amp * cp.sin(2*cp.pi*3e3*time + mod) noise = cp.random.normal(scale=cp.sqrt(noise_power), size=time.shape) noise *= cp.exp(-time/5) x = carrier + noise y = cp.random.normal(scale=0.1*cp.sqrt(noise_power), size=len(x)) # - # #### cusignal.acoustics.cepstrum.complex_cepstrum() cusignal.complex_cepstrum(x) # #### cusignal.acoustics.cepstrum.real_cepstrum() cusignal.real_cepstrum(x) # #### cusignal.spectral_analysis.spectral.coherence() f, Cxy = cusignal.coherence(x, y, fs, nperseg=100) plt.semilogy(cp.asnumpy(f), cp.asnumpy(Cxy)) plt.xlabel('frequency [Hz]') plt.ylabel('Coherence') # #### cusignal.spectral_analysis.spectral.csd() f, Cxy = cusignal.csd(x, y, fs, nperseg=100, window=('kaiser', 5)) plt.semilogy(cp.asnumpy(f), cp.asnumpy(Cxy)) plt.xlabel('frequency [Hz]') plt.ylabel('CSD') # #### cusignal.spectral_analysis.spectral.lombscargle() # + x_signal = cp.linspace(0.01, 10, len(x)) x_period = cp.linspace(0.01, 10, len(x)) periodogram = cusignal.lombscargle(x_signal, y, x_period) plt.semilogy(cp.asnumpy(x_signal), cp.asnumpy(x)) plt.semilogy(cp.asnumpy(x_period), cp.asnumpy(periodogram)) plt.xlabel('frequency [Hz]') # - # #### cusignal.spectral_analysis.spectral.periodogram() f, Pxx_den = cusignal.periodogram(x, fs) plt.semilogy(cp.asnumpy(f), cp.asnumpy(Pxx_den)) plt.ylim([1e-7, 1e2]) plt.xlabel('frequency [Hz]') plt.ylabel('PSD [V**2/Hz]') f, Pxx_spec = cusignal.periodogram(x, fs, 'flattop', scaling='spectrum') plt.figure() plt.semilogy(cp.asnumpy(f), cp.asnumpy(cp.sqrt(Pxx_spec))) plt.ylim([1e-4, 1e1]) plt.xlabel('frequency [Hz]') plt.ylabel('Linear spectrum [V RMS]') # #### cusignal.spectral_analysis.spectral.spectrogram() # + f, t, Sxx = cusignal.spectrogram(x, fs) plt.pcolormesh(cp.asnumpy(t), cp.asnumpy(f), cp.asnumpy(Sxx)) plt.ylabel('Frequency [Hz]') plt.xlabel('Time [sec]') plt.savefig('Spectrogram.png', dpi=150) # - f, t, Sxx = cusignal.spectrogram(x, fs, return_onesided=False) plt.pcolormesh(cp.asnumpy(t), cp.asnumpy(cp.fft.fftshift(f)), cp.asnumpy(cp.fft.fftshift(Sxx, axes=0))) plt.ylabel('Frequency [Hz]') plt.xlabel('Time [sec]') # #### cusignal.spectral_analysis.spectral.stft() f, t, Zxx = cusignal.stft(x, fs, nperseg=1000) plt.pcolormesh(cp.asnumpy(t), cp.asnumpy(f), cp.asnumpy(cp.abs(Zxx)), \ vmin=0, vmax=amp) plt.title('STFT Magnitude') plt.ylabel('Frequency [Hz]') plt.xlabel('Time [sec]') # #### cusignal.spectral_analysis.spectral.vectorstrength() cusignal.vectorstrength(data, 10) # #### cusignal.spectral_analysis.spectral.welch() f, Pxx_den = cusignal.welch(x, fs, nperseg=1024) plt.semilogy(cp.asnumpy(f), cp.asnumpy(Pxx_den)) plt.ylim([0.5e-3, 1]) plt.xlabel('frequency [Hz]') plt.ylabel('PSD [V**2/Hz]')
cheatsheets/cuSignal/cusignal_SpectralAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="uYAGzO1NvOey" # ## Getting Data from API's with Python # + [markdown] colab_type="text" id="8a6TyXl8u_5U" # **GW Libraries and Academic Innovation** # # March 2, 2020 # # <NAME> # # + [markdown] colab_type="text" id="GsYYtH6Nu9w9" # ### Workshop goals # # This workshop will cover a couple of basic use cases for retrieving data from RESTful API's with Python. # # By the conclusion of this workshop, you will have worked through the following: # # * Understanding the REST framework for data retrieval # * Obtaining and using an API key # * Constructing a query with parameters in Python using the `requests` library # * Writing a `for` loop to retrieve multiple pages of results # * Parsing a JSON response # * Exporting data in CSV format # # # + [markdown] colab_type="text" id="dPd4C14Lwi8v" # ### Tips for using this Google Colab notebook # # When working in a Google Colaboratory notebook, Shift-Return (Shift-Enter) runs the cell you're on. You can also run the cell using the "Play" button at the left edge of the cell. # # There are many other keyboard shortcuts. You can access the list via the menu bar, at Tools-->Command palette. In fact, you can even customize your keyboard shortcuts using Tools-->Keyboard shortcuts. # # (If you're working in an Anaconda/Jupyter notebook: # - Control-Enter (Ctrl-Return) runs the cell you're on. You can also run the cell using the "Play" button in the toolbar. Esc, then A inserts a cell above where you are. # - Esc, then B inserts a cell below where you are. # - More shortcuts under Help --> Keyboard Shortcuts) # # You will probably get some errors in working through this notebook. That's okay, you can just go back and change the cell and re-run it. # # The notebook auto-saves as you work, just like gmail and most Google apps. # + [markdown] colab_type="text" id="Z-b1a19vznby" # ### What is an API? # # An **A**pplication **P**rogramming **I**nterface is a generic term for functionality that allows one computer application to talk to another. In contrast to a graphical user interface (GUI), which allows an end user to interact with an application via visual symbols (*e.g.* icons) and manual operations (*e.g.* mouse clicks), an API allows a user to interact with the application by writing code. # # # You can think of API's as the glue that holds together the various modules and libraries of code that make up a given system, whether we're talking about a single piece of software or the entire World Wide Web. # # ------------------------- # # ### What is REST? # # **R**epresentational **S**tate **T**ransfer refers to a common set of principles implemented by services that communicate via the web. Most RESTful API's use **HTTP** to provide access. Via HTTP and its core methods, you code can communicate with a web service jthe way your browser does when you visit a web site. We'll see how to write code to do just that in this workshop. # + [markdown] colab_type="text" id="_xFhpp8t-U5B" # ### Using an API to find and rerieve news headlines # + [markdown] colab_type="text" id="pa21rcXp9QnQ" # We're going to use a couple of libraries for making API calls and processing the data these calls return. They are not part of the standard Python distribution, but they're pre-installed for Google Colaboratory notebooks. If you're running a Jupyter notebook locally on your computer via the Anaconda distribution of Python, they are pre-installed there as well. If not, you can install them yourself by running these commands inline in your notebook: # # `!pip install pandas` # # `!pip intall requests` # # You can also install them at the command line by using the above commands *without* the prefixed exclamation point. # + [markdown] colab_type="text" id="kwgwE_f1_lyu" # **Run the following cell by pressing _Shift_ plus _Enter_.** # + colab={} colab_type="code" id="y4EXmTSuvMeY" import pandas as pd import requests # + [markdown] colab_type="text" id="NbQta595MLrS" # **News API** https://newsapi.org/docs/ provides content scraped from a variety of news organizations websites in single stream. Accounts for developers and non-commercial users are free and provide access to the metadata (though not the full article content) for news stories up to 1 month old. # + [markdown] colab_type="text" id="Aq-7zGGf_Cy_" # **Step 1** # # Register for an API key. Many, though not all, web API's require users to authenticate with some kind of credentials. An API key is a simple way to do this, by passing a unique identifier to the service along with each of your HTTP requests. # # To register for an API key with News API, visit this link and fill out and submit the form: https://newsapi.org/register # + [markdown] colab_type="text" id="_CqFzDQq_eoh" # Once you have your API key, let's **assign it to a variable.** You will use this variable in constructing your requests to this API. # # # # + colab={} colab_type="code" id="AWkAKXOEu3_K" NEWS_API_KEY = '' # + [markdown] colab_type="text" id="MlZmSTJS__8O" # **Step 2** # # We also need a URL to point our code to. Because we'll be using a GET request (more on that in a minute), our URL will include additional parameters that specify the data we're looking for. But the first part of the URL will remain the same for all our requests to this API. # # This part of the URL is described in the [News API documentation](https://newsapi.org/docs/endpoints). The part after the `.org` is called an **endpoint**. API's may provide multiple endpoints for different types of data. The News API endpoint we're using allows us to search all content collected by the service over the past 30 days. # # + colab={} colab_type="code" id="j0Qeuy_eNhXM" NEWS_BASE_URL = 'https://newsapi.org/v2/everything' # + [markdown] colab_type="text" id="nF5iHahrBh-9" # **Step 3** # # Now that we have our endpoint, we are ready to construct our query. Let's say we're looking for stories about a topic in the news a lot lately: impeachment. You can feel free to substitute your own topic of interest. # # Let's take a moment to review the [API documentation](https://newsapi.org/docs/endpoints/everything) for the `/everything` endpoint. # # There are a couple of points to pay attention to: # # 1. This endpoint accepts GET requests. GET is the simplest method defined by HTTP. GET requests have a couple of features: # # a. All of your data (as the requestor) must passed as part of the URL. Other HTTP methods, like POST, allow you to include a body of data with your request, like a form or a file, but we won't be using those here. # # b. GET requests are considered _safe_ because you can use them only to retreive data, not to make any changes to the data on the server from which you are making the request. # # 2. The documentation defines a set of **request parameters** and a **response object**. These are the meat of the API. The request parameters specify fields that your application, _as the requestor_ can use to define your request. The response object is structured data that the API will return in response to your request. # # # + [markdown] colab_type="text" id="mSXsr2KEEtVm" # **Step 3.a** # # We'll start with a simple query: everything in the newsbank that matches the keyword "impeachment." # # As you saw in the documentation, our requests will pass our query parameters to the API as part of the URL. But since we're using the `requests` Python library to make our requests, we don't have to construct the whole URL ourselves; the library will do it for us, as long as we pass it a dictionary (a set of key-value pairs.) # # **Define a dictionary called `params` that has two items:** # * Your query, which should be a string value assigned to the key `q` # * Your API key, assigned to the key `apiKey` # # Since you already stored your API key in a variable, you can refer back to it inside your dictionary by that variable name. # + colab={} colab_type="code" id="fU87D2f0Cs5t" params = {'q': 'impeachment', 'apiKey': NEWS_API_KEY} # + [markdown] colab_type="text" id="fkNVp8NrINAD" # **Step 3.b** # We'll use the `get()` method, defined by the `requests` module, to make a GET request. # # **Call `requests.get`, passing it as arguments the following:** # * the variable that holds your base URL # * your dictionary of parameters as a keyword argument, _i.e._, `params=your_custom_params`. # # **Assign your call to `requests.get` to a new variable. This variable will hold the response object from the API.** # # Note: `requests` HTTP methods also support passing a custom HTTP request header as an argument to the `headers` keyword. Some API's require custom header values, but we're not covering that today. # + colab={} colab_type="code" id="5bwffhi1DGrC" news_data_response = requests.get(NEWS_BASE_URL, params=params) # + [markdown] colab_type="text" id="ypaWfwbXJv2P" # The response object that `requests` returns includes the data from our GET request. It also includes other useful information among its properties and methods. # # If we inspect the response itself, we can see the HTTP status. `200` = a successful request. # + colab={} colab_type="code" id="ovGbzrDwDPzU" news_data_response # + [markdown] colab_type="text" id="5WqhGINbKk80" # We can see the actual URL of our request by accessing the `url` property of the `request` property of our response. This can be useful for troubleshooting. # + colab={} colab_type="code" id="WqaLMosGDP_V" news_data_response.request.url # + [markdown] colab_type="text" id="Z8qyi2lSK-GN" # **Step 3.c** # # **Access the JSON data in the response object by calling the latter's `json()` method. # Store the data in a new variable.** # # Note that this method requires no arguments. # + colab={} colab_type="code" id="nL7sY70ZDP8S" news_data = news_data_response.json() # + [markdown] colab_type="text" id="tXpzleT4Lnwb" # Per the documentation, the API's response contains a field called `totalResults` that records how many results were found. Because the data is in JSON format, the `json()` method converts the data to a native Python object -- in this case, a dictionary. # # + colab={} colab_type="code" id="AjPmdTBJEBea" news_data['totalResults'] # + [markdown] colab_type="text" id="-of4ID1wMdnQ" # So let's look at some of these results. The article-level metadata for these news stories are stored in a field whose key is `articles`. # + colab={} colab_type="code" id="Ak74lGfdMc1A" news_data['articles'] # + [markdown] colab_type="text" id="u0XGGVgRM3dT" # How would you describe the structure of the `articles` object that's nested within our response object? # + [markdown] colab_type="text" id="T8NbaXegNBNM" # How many articles are there in this list? # + colab={} colab_type="code" id="TGNh4xcvNGih" len(news_data['articles']) # + [markdown] colab_type="text" id="KYMvNDlMNEZX" # Out of 35,000, we only retrieved the first 20. The maximum number this API will return per request is 100. We can refactor our request code to take advantage of the `page` and `pageSize` parameters to get more results in sequence. # # # **Step 4.a** # # **Calculate the total number of pages we'we should expect to have in this result set, given the total number of results and a page size of 100.** # # Note that we'll need an integer for the number of total pages, so we'll need to round up. # + colab={} colab_type="code" id="UnxKmaCVNFSB" news_data['totalResults'] // 20 + 1 # + [markdown] colab_type="text" id="dFM3m_MGPCzd" # **Step 4.b** # # **Modify your request parameters dictionary if necessary.** # # In this case, we're keeping the `pageSize` parameter the same, for the purposes of demonstration. But to be more efficient, we could set this to the maximum value. # # But let's add another parameter, just to see how this works. The `domains` parameter restricts the results to articles from a specific news website. # + colab={} colab_type="code" id="hjZZQ5GnQA4C" params.update({'domains': 'cnn.com'}) # + [markdown] colab_type="text" id="_HejQEqmQNXP" # **Step 4.c** # # **Set up a `for` loop to make as many requests as we need, incrementing the `page` parameter of our request each time.** # # For the sake of this exercise, let's limit our number of queries to 5 so that it doesn't take too long. # # **Within the loop, we'll need to store our data within a list so that we can access it afterwards.** # + colab={} colab_type="code" id="0V4MynoQSMIH" # Make sure to define your list outside of the for loop, so that it doesn't get overwritten each time data_all_pages = [] # We're using range to generate a list from 1 to 5 for n in range(1, 6): # Each time through, we update the page parameter params['page'] = n response = requests.get(NEWS_BASE_URL, params=params) # Error checking: if for some reason our request returns a status other than 200, we print the status message and exit the loop if response.status_code != 200: print(response.text) break data_this_page = response.json() # We are just saving the article-level metadata # We're using the extend method of the list object to avoid creating a nested list data_all_pages.extend(data_this_page['articles']) # + [markdown] colab_type="text" id="XTLlq_pxUfzc" # We should have retrieved 100 results. Did the code above work as planned? # + colab={} colab_type="code" id="9HcKBDpbT8e-" len(data_all_pages) # + [markdown] colab_type="text" id="UFhwuqGVUl-V" # **Step 5** # # **Convert the list of results to CSV format.** # # Lucky for us, the JSON object returned by the News API is not very complex. With one fairly minor adjustment, the list of articles can be flattened into a table structure for export to a spreadsheet. # # As a rule of thumb, a list of dictionaries in Python can be converted into a 2-D table provided the following are true: # # 1. Each dictionary has the same keys. (The number of columns in the table will correspond to the number of unique keys in the data.) # # 2. No dictionary entry is nested. In other words, the dictionary items have only scalar values. # # In our case, only one metadata item is nested at the article level. The `source` key has a dictionary for its value. # # # + [markdown] colab_type="text" id="FoDl65YhW_Z8" # **Step 5.a** # # **Flatten the `source` dictionary by creating key-value pairs at the article level.** # # + colab={} colab_type="code" id="1e8jiYwmUlOX" # We iterate over the list, updating each article dictionary as we go. # We use enumerate to avoid modifying a loop variable. for i, _ in enumerate(data_all_pages): # Get the article at this index in the list article = data_all_pages[i] # Store the source dictionary in a temporary variable source = article['source'] # Assign new key-value pairs to this article dictionary to hold the data from the source dictionary article['source_name'] = source['name'] article['source_id'] = source['id'] # Remove the source dictionary from its parent article dictionary del article['source'] # + [markdown] colab_type="text" id="BmG8d8T_YiJC" # **Step 5.b** # # **Use the `pandas` library to create a DataFrame out of this list of dictionaries.** # # There are other methods for creating tables for export, but pandas provides a particularly # convenient way of doing that. # + colab={} colab_type="code" id="C61m4KouZrm9" # The from_records method on the DataFrame class in pandas will create a table out of list of dicts, using the dictionary keys as the column headers. news_dataframe = pd.DataFrame.from_records(data_all_pages) # + [markdown] colab_type="text" id="w8AdFoSCZ-5q" # **Step 5.c** # # **Use the `to_csv` method on your DataFrame to save it as a CSV file .** # # If you don't pass the `index=False` argument to the `to_csv` function, your CSV will have a numerical index as the first column. # # + colab={} colab_type="code" id="3MvW6nkeZ5tR" news_dataframe.to_csv('news_api_results_first_100.csv', index=False) # + [markdown] colab_type="text" id="GNqKB3hob8th" # If you're using Google Colab, you can download your file by expanding the panel to the left of your notebook. Then click on the **Files** tab. You should see you file listed there. Right click on it and select **Download**. # # + [markdown] colab_type="text" id="Pk3cmAc20EGR" # ### Using an API to get stock price data # + [markdown] colab_type="text" id="D_fsrsr3Fb5S" # **Alpha Vantage** https://www.alphavantage.co/documentation/ provides a free API for retrieving daily stock price data in either JSON or CSV format. # # We'll use this API to get some time-series data about the prices of Tesla stock. # + [markdown] colab_type="text" id="meTlhhDd1dtw" # **Step 1: Create an account at Alpha Vantage to get your API key.** # # You'll need to enter your name and email address at the following link: https://www.alphavantage.co/support/#api-key # + [markdown] colab_type="text" id="UHEiki0012ov" # **Step 2: Create variables to store your API key and the Alpha Vantage endpoint.** # # + colab={} colab_type="code" id="2ti7Dcl1ANaj" ALPHA_VANTAGE_KEY = '' # + colab={} colab_type="code" id="LheeNqpbFgMc" AV_URL = 'https://www.alphavantage.co/query' # + [markdown] colab_type="text" id="gC8ZRUOQ2FYd" # **Step 3: Create your dict of parameters.** # # As we did with the News API, we'll be making a GET request. Alpha Vantage has only one endpoint, and you specify the kind of data you want by using the `function` parameter in your request. # # We want the intraday stock price data, so we should that parameter to `TIME_SERIES_INTRADAY`. We also need to pass it the ticker symbol for our commodity as a value on the parameter `symbol`. There are a few additional parameters we can set to make sure we get a decent amount of data back. # # + colab={} colab_type="code" id="zYGmvX4mHFZh" params = {'function': 'TIME_SERIES_INTRADAY', 'symbol': 'TSLA', 'interval': '60min', 'outputsize': 'full', 'apikey': ALPHA_VANTAGE_KEY} # + [markdown] colab_type="text" id="B-Nx76CH3RF5" # **Step 4: Make your request and store the response in a variable.** # + colab={} colab_type="code" id="GSzyVKYrB7J9" av_response = requests.get(AV_URL, params=params) tesla_time_series = av_response.json() # + [markdown] colab_type="text" id="PYg8cKAh3szc" # The Alpha Vantage API, like News API, returns JSON by default. But let's make sure that we got back something useable. # # What do you notice about the structure of the data from this API? # # How does it differ from what we go from News API? # + [markdown] colab_type="text" id="WkO6uiWC4Wln" # **Step 5.a** # # Since this data is quantitative, it would be especially helpful to have it in tabular form. Once again, we can use the `DataFrame` class within the `pandas` library to flatten a nested native Python data structure into a table. # # The command this time is a little different, because we're not dealing with a list as the outermost Python element. # + colab={} colab_type="code" id="uPzB7npfH2vn" # The orient='index' keyword argument is necessary to tell pandas that the keys of our outermost dictionary should be the index values of the table tesla_table = pd.DataFrame.from_dict(tesla_time_series['Time Series (60min)'], orient='index') # + [markdown] colab_type="text" id="zjxmENT8-UJ0" # At this point, we could can export our table as a CSV file, just as we did with the News API data. # # Or we might want to use pandas to do some analysis, since pandas has a lot of powerful functions for data visualization and analysis. # # But first we need to do a little cleanup. # + [markdown] colab_type="text" id="JekudBg_6U__" # **Step 5.b** # # The index of our table is a time series. But pandas doesn't necessarily know that yet, since the data we retrieved from the API were purely strings. # # **Convert the DataFrame's index values to datetime objects.** # + colab={} colab_type="code" id="dt7yj4gGJxZ4" tesla_table.index = pd.to_datetime(tesla_table.index) # + [markdown] colab_type="text" id="Cix9CjOm_CDi" # **Step 5.b** # # **Convert the values in the rest of the table to a numeric type (float).** # + colab={} colab_type="code" id="zuSjEgcS_NcZ" tesla_table_numeric = tesla_table.astype('float') # + [markdown] colab_type="text" id="02_LcVfp_Q3w" # **Step 5.c** # # **Finally, clean up the column names to remove the initial number.** # # We can do this using the Python string function `split`. And we can access the column labels of a DataFrame by using the `columns` property. By passing a new list to `DataFrame.columns`, we can replace the existing labels with new ones. # + colab={} colab_type="code" id="jVVg_RmxKzua" def clean_column(column_name): column_name = column_name.split() return column_name[1] tesla_table_numeric.columns = [clean_column(c) for c in tesla_table_numeric.columns] # + [markdown] colab_type="text" id="8mzXldDK_5I1" # **Step 6** # # **Use `pandas` built-in plotting functions to visualize the data.** # # `pandas` uses the `pyplot` module from the `matplotlib` library to produce charts and graphs. If you want to customize your charts and graphs, you can run `from matplotlib import pyplot`, which will give you many more options. # + colab={} colab_type="code" id="QMb0ONXc7ZKU" # Here we produce a simple line chart of the highest stock price for each period measured. Because our index is a time series, pandas automatically makes that the X axis. tesla_table_numeric.plot(y='high') # + [markdown] colab_type="text" id="_9nbmG3MA7Ev" # **Step 7** # # **Export your data as a CSV file.** # # Note that our table has about 1,500 rows, so the CSV file will be fairly large. In these cases, if you're working in Colab, it's better to save the file to your Google Drive first before downloading; downloading directly from the **Files** panel in Colab isn't efficient for large files. If you click the `MOUNT DRIVE` icon on the Files panel, it will insert some code into your notebook that you can run to enable access to your Drive. From there, you can save the file to your Drive instead. # # If you are running this from a Jupyter notebook, you don't need to worry about that step. # + colab={} colab_type="code" id="KSbnF3BJ7a5d" tesla_table_numeric.to_csv('tesla_time_series.csv') # + [markdown] colab_type="text" id="WRHxQu8oBJM_" # ### Tips for working with API's in Python # # In this workshop, we covered the basics of using Python to retrieve data from API's. This approach may cover many of your uses cases. But here are a few tips for further exploration: # # # # * If you need to make many requests in tandem -- _e.g._, you want to request stock price data for a long list of commodities -- you can sometimes speed up your code dramatically by using the asynchronous functionality available in the most recent releases of Python. # * The [`aiohttp` library](https://aiohttp.readthedocs.io/en/stable/) provides functions that, in conjunction with the `asyncio` library that ships with Python 3.7+, allow you to make asynchronous HTTP requests. For an introduction to asynchronous programming in Python that covers the latest developments, see "[Async IO in Python: A Complete Walkthrough](https://realpython.com/async-io-python/)." # * Though many API's return data in JSON, some use XML instead. Unlike JSON, XML can't be converted to Python data types in one step; it's necessary to parse the XML first and extract the elements you want. The [lxml](https://lxml.de/) library provides robust functionality for doing that. # # * Before using any API, make sure you read the documentation and the terms of service. # * Many API's require you to limit your requests to a certain number per specified time period (_e.g._, per second, per hour, per day.) Violating these limits can sometimes cause the provider to shut off access to your account/IP address. # * Some API's place restrictions on what you can do with the data. For instance, they may make it available only for research purposes, or only non-commercial uses, etc. # # # + colab={} colab_type="code" id="ll3aIwOpHs4L"
python-for-apis/python_api_workshop.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="fDtXa-zQdQ6w" tags=["outputPrepend"] # !pip install tfx==0.22.0 # + colab={} colab_type="code" id="i5TEWazMdQ60" import os import sys import tensorflow as tf from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.utils.dsl_utils import external_input from tfx.components import CsvExampleGen from tfx.proto import pusher_pb2, trainer_pb2, example_gen_pb2 # + colab={} colab_type="code" id="qlLclF5TdQ62" outputId="1a1f5914-645c-4fa0-abb9-768e8e2e5c30" tags=[] context = InteractiveContext(pipeline_root='../tfx') # + colab={} colab_type="code" id="eBe2ycnpdQ65" outputId="b07e3d22-5548-48ab-e2ae-451583334d84" tags=[] base_dir = os.getcwd() #data_dir = "../data/consumer_complaints_with_narrative.csv" data_dir = "../data" #examples = external_input(data_dir) #example_gen = CsvExampleGen(input=examples) output = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig( splits=[ example_gen_pb2.SplitConfig.Split( name="train", hash_buckets=9 ), example_gen_pb2.SplitConfig.Split(name="eval", hash_buckets=1), ] ) ) examples = external_input(data_dir) example_gen = CsvExampleGen(input=examples, output_config=output) # - context.run(example_gen) # + colab={} colab_type="code" id="CnFyEhTWdQ67" outputId="a9edd0de-9101-4a59-f1c8-0402056e59d2" from tfx.components import StatisticsGen statistics_gen = StatisticsGen( examples=example_gen.outputs['examples']) context.run(statistics_gen) # + colab={} colab_type="code" id="dUIpEwKmdQ69" outputId="4e472c96-75e1-4df5-808a-d96ecfe40f01" tags=[] context.show(statistics_gen.outputs['statistics']) # + colab={} colab_type="code" id="aftvB9midQ6_" outputId="4f069209-2fdc-4ccd-e086-88a5c7291465" from tfx.components import SchemaGen schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True) context.run(schema_gen) # + colab={} colab_type="code" id="zaZ8sb8vdQ7B" outputId="82b20272-de97-47bf-907f-5975e61072b8" context.show(schema_gen.outputs['schema']) # + colab={} colab_type="code" id="RqX5ay4GdQ7D" outputId="9cf3df64-0bb8-4876-cf59-649fab196e3d" from tfx.components import ExampleValidator example_validator = ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) context.run(example_validator) context.show(example_validator.outputs['anomalies']) # + colab={} colab_type="code" id="rEpmhz4rdQ7F" transform_file = os.path.join(base_dir, '../components/module.py') # + colab={} colab_type="code" id="uQPUwTD7dQ7H" outputId="32162f73-8540-434f-c670-4c04c55021f1" tags=["outputPrepend"] from tfx.components import Transform transform = Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=transform_file) context.run(transform) # + colab={} colab_type="code" id="HcTmORm3dQ7I" trainer_file = os.path.join(base_dir, '../components/module.py') # + colab={} colab_type="code" id="fW5DqNOLdQ7K" outputId="fb8c3d01-6871-4cbc-ff19-1a7b01caa152" tags=[] from tfx.components import Trainer from tfx.proto import trainer_pb2 from tfx.components.base import executor_spec from tfx.components.trainer.executor import GenericExecutor TRAINING_STEPS = 1000 EVALUATION_STEPS = 100 trainer = Trainer( module_file=trainer_file, custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor), examples=transform.outputs['transformed_examples'], schema=schema_gen.outputs['schema'], transform_graph=transform.outputs['transform_graph'], train_args=trainer_pb2.TrainArgs(num_steps=TRAINING_STEPS), eval_args=trainer_pb2.EvalArgs(num_steps=EVALUATION_STEPS)) context.run(trainer) # + [markdown] colab_type="text" heading_collapsed=true id="Onn4P7bNdQ7M" # ### Load TensorBoard # + colab={} colab_type="code" hidden=true id="K4u1dIUedQ7M" model_artifact_dir = trainer.outputs['model'].get()[0].uri # + colab={} colab_type="code" hidden=true id="ADmH1NOZdQ7O" outputId="b54e0475-8171-45d0-e957-e8940275ebed" tags=[] log_dir = os.path.join(model_artifact_dir, 'logs/') # %load_ext tensorboard # %tensorboard --logdir {log_dir} # + [markdown] colab_type="text" id="2Ehf-8OJdQ7Q" # ### Evaluate the model # + colab={} colab_type="code" id="blIfohW7dQ7T" outputId="dac90e1b-b3bf-4021-e6c3-c869452be203" from tfx.components import ResolverNode from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import ModelBlessing model_resolver = ResolverNode( instance_name='latest_blessed_model_resolver', resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing)) context.run(model_resolver) # + colab={} colab_type="code" id="sYxrTXWrdQ7V" # nb it always blesses on first run even if below threshold import tensorflow_model_analysis as tfma eval_config=tfma.EvalConfig( model_specs=[tfma.ModelSpec(label_key='consumer_disputed')], slicing_specs=[tfma.SlicingSpec(), tfma.SlicingSpec(feature_keys=['product'])], metrics_specs=[ tfma.MetricsSpec(metrics=[ tfma.MetricConfig(class_name='BinaryAccuracy'), tfma.MetricConfig(class_name='ExampleCount'), tfma.MetricConfig(class_name='AUC') ], thresholds={ 'AUC': tfma.config.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.65}), change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': 0.01}))} )]) # + colab={} colab_type="code" id="siVCVXLAdQ7W" outputId="20c4b344-26e1-4ba4-9b27-8333f9c6a24f" tags=[] from tfx.components import Evaluator evaluator = Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) context.run(evaluator) # + colab={} colab_type="code" id="jJRVrAjpdQ7Y" #NB TFMA visualizations will not run in Jupyter Lab import tensorflow_model_analysis as tfma # Get the TFMA output result path and load the result. PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri tfma_result = tfma.load_eval_result(PATH_TO_RESULT) # + colab={"referenced_widgets": ["f75a07bbbbe1453db00ddd65160ab9c9"]} colab_type="code" id="kUZgwfFbdQ7a" outputId="5ffae360-ecfd-4c68-9ffc-b4446dcab863" tfma.view.render_slicing_metrics(tfma_result) # + colab={} colab_type="code" id="AHa-VNwMdQ7c" outputId="6a363363-e2bd-4543-c4e7-c314c7220366" from tfx.components.pusher.component import Pusher from tfx.proto import pusher_pb2 _serving_model_dir = "../interactive-pipeline/serving_model_dir" pusher = Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=_serving_model_dir))) context.run(pusher) # + [markdown] colab_type="text" id="t-coENTudQ7e" # ### Extra stuff # + colab={} colab_type="code" id="2y5TnFJEdQ7e" # !mkdir -p ../tfx-9Apr/serving_model_dir # + colab={} colab_type="code" id="t011eTKtdQ7g" tags=[] # !jupyter nbextension enable --py widgetsnbextension --sys-prefix # !jupyter nbextension install --py --symlink tensorflow_model_analysis --sys-prefix # !jupyter nbextension enable --py tensorflow_model_analysis --sys-prefix # THEN REFRESH BROWSER PAGE! # + colab={} colab_type="code" id="0mnTmEQDdQ7i" PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri print(tfma.load_validation_result(PATH_TO_RESULT)) # + colab={} colab_type="code" id="LxX5QChUdQ7j" # Show data sliced by product tfma.view.render_slicing_metrics( tfma_result, slicing_column='product') # + colab={} colab_type="code" id="BOobZFeJdQ7m" # fairness indicators direct from pipeline # https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/fairness_indicators/examples/Fairness_Indicators_Lineage_Case_Study.ipynb # + colab={} colab_type="code" id="MH6qEFWrdQ7o"
interactive-pipeline/.ipynb_checkpoints/interactive_pipeline-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- import matplotlib.pyplot as plt import numpy as np import scipy.stats as sts # %matplotlib inline # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # + # Set the x-axis to a list of strings for each month. x_axis = ["Jan", "Feb", "Mar", "April", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] # Set the y-axis to a list of floats as the total fare in US dollars accumulated for each month. y_axis = [10.02, 23.24, 39.20, 35.42, 32.34, 27.04, 43.82, 10.56, 11.85, 27.90, 20.71, 20.09] # - # Create the plot plt.plot(x_axis, y_axis) fig, ax = plt.subplots() ax.plot(x_axis, y_axis) fig=plt.figure() ax=fig.add_subplot() ax.plot(x_axis, y_axis) plt.plot(x_axis, y_axis, marker = "D", color = "green", linewidth =2, label = "Boston") plt.xlabel("Date") plt.ylabel("Fare()") plt.ylim(0,45) plt.title("PyBer Fare by Month") plt.legend() plt.grid() plt.bar(x_axis, y_axis,color = "green", label = "Boston") plt.xlabel("Date") plt.ylabel("Fare$") plt.title("PyBer Fare by Month") plt.legend() plt.barh(x_axis, y_axis, color ="magenta", label = "Boston") plt.gca().invert_yaxis() plt.legend() fig, ax = plt.subplots() ax.bar(x_axis, y_axis) fig, ax = plt.subplots() ax.barh(x_axis, y_axis, color = "cyan", label = "Chicago") ax.invert_yaxis() ax.set_title("PyBer Fare by Month") ax.set_xlabel("Fare$") ax.set_ylabel("Date") ax.legend() plt.plot(x_axis, y_axis, "o") plt.scatter(x_axis, y_axis, color = "r", label = "chicago") plt.title("PyBer by Fare Month") plt.xlabel("Fare$") plt.ylabel("Date") plt.gca().invert_yaxis() plt.scatter(x_axis, y_axis, s=y_axis) y_axis_larger = [] for data in y_axis: y_axis_larger.append(data*3) plt.scatter(x_axis, y_axis, s=y_axis) plt.scatter(x_axis, y_axis, s = [i*5 for i in y_axis]) fig, ax = plt.subplots() ax.scatter(y_axis, x_axis, label = "Boston", color = "skyblue", alpha = .75, lw = 2, edgecolors = "black", s = [i*5 for i in y_axis]) ax.set_title("pyBer Fare by Month") ax.set_xlabel("Fare$") ax.set_ylabel("Date") ax.invert_yaxis() ax.set_xlim(0,50) ax.legend() # + # Assign 12 colors, one for each month. colors = ["slateblue", "magenta", "lightblue", "green", "yellowgreen", "greenyellow", "yellow", "orange", "gold", "indianred", "tomato", "mistyrose"] explode_values = (0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0) plt.subplots(figsize=(8, 8)) plt.pie(y_axis, explode=explode_values, colors=colors, labels=x_axis, autopct='%.1f%%') plt.show() # + fig, ax = plt.subplots(figsize=(8,8)) ax.pie(y_axis,labels=x_axis, autopct = "%.1f%%", explode = (0,0,.2, 0,0, .3,0,0,0,0,0,0), shadow = True, startangle = 90 ) plt.show() # -
PyBer_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 from dataset import FashionEdgesDataset from utils import edges2mask, image2edges, vis_batch, collate_fn, tensor2numpy import torch from torch.utils.data import DataLoader import torchvision import os import numpy as np import matplotlib.pyplot as plt from tqdm import tqdm_notebook # debug from skimage.morphology import dilation, disk, square from torch.nn.functional import interpolate from PIL import Image import cv2 # - from skimage.transform import resize # + dataset = FashionEdgesDataset('../cp-vton/', check_corrupted=True, size=(128,128), return_mask=True) training_data_loader = DataLoader(dataset=dataset, batch_size=36, collate_fn = collate_fn, shuffle=False) for batch in training_data_loader: break vis_batch(batch[-1], 16) # - vis_batch(batch[0], 16) vis_batch(batch[1], 16) vgg = torchvision.models.vgg19(pretrained=True).cuda() features = [] for batch in tqdm_notebook(training_data_loader): mask = batch[0] mask = torch.cat([mask,mask,mask], 1) image = batch[1] image_bin = image * mask feature = vgg.features(image_bin.float().cuda()) features.append(feature.detach().cpu()) from sklearn.cluster import DBSCAN from sklearn.manifold import TSNE (dbscan.labels_ == -1).sum() / len(dbscan.labels_) dbscan = DBSCAN(leaf_size=100) dbscan.fit(f) np.unique(dbscan.labels_)
.ipynb_checkpoints/extract_textures-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd data = {'label1': ['pig','pig', 'cat', None], 'label2': ['cow', 'dog', None, 'pig'], 'label3': ['cow', 'horse', 'pig', 'cat']} df = pd.DataFrame(data) df # + def move_to_end(lst): new_list = [label for label in lst if label] return new_list + [None] def flexible_data(df): ''' Input: Dataframe with n(i, j) = j-th annotation for the i-th data instance. Output: numbered_labels, list of unique data points converted to numbers data_dict, dictionary connecting the original data point to its converted number ''' all_data = [] for idx, row in df.iterrows(): all_data += list(row) unique_data = list(set(all_data)) unique_data = move_to_end(unique_data) unique_data_ex_none = list(filter(None, unique_data)) numbered_labels = [i for i in range(len(unique_data_ex_none))] data_dict = { } for name, i in zip(unique_data, numbered_labels): if name == None: data_dict[name] = name else: data_dict[name] = float(i) data_dict[None] = None return data_dict, numbered_labels def convert_dataframe(df): data_dict, numbered_labels = flexible_data(df) new_data = { } for idx, col in df.iteritems(): new_column = [] for instance in col: new_column.append(data_dict[instance]) new_data[idx] = new_column new_data = pd.DataFrame(new_data) return new_data, numbered_labels # - df_new = convert_dataframe(df) df_new[0] df lst = ['hi', 'bye', None, 'hello'] lst new_list = move_to_end(lst) new_list
notebooks/.ipynb_checkpoints/Untitled1-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.png) # # Assess Fairness, Explore Interpretability, and Mitigate Fairness Issues # # This notebook demonstrates how to use [InterpretML](interpret.ml), [Fairlearn](fairlearn.org), and the [Responsible AI Widget's](https://github.com/microsoft/responsible-ai-widgets/) Fairness and Interpretability dashboards to understand a model trained on the Census dataset. This dataset is a classification problem - given a range of data about 32,000 individuals, predict whether their annual income is above or below fifty thousand dollars per year. # # For the purposes of this notebook, we shall treat this as a loan decision problem. We will pretend that the label indicates whether or not each individual repaid a loan in the past. We will use the data to train a predictor to predict whether previously unseen individuals will repay a loan or not. The assumption is that the model predictions are used to decide whether an individual should be offered a loan. # # We will first train a fairness-unaware predictor, load its global and local explanations, and use the interpretability and fairness dashboards to demonstrate how this model leads to unfair decisions (under a specific notion of fairness called *demographic parity*). We then mitigate unfairness by applying the `GridSearch` algorithm from `Fairlearn` package. # # ## Install required packages # %pip install --upgrade fairlearn # %pip install --upgrade interpret-community # %pip install --upgrade raiwidgets # After installing packages, you must close and reopen the notebook as well as restarting the kernel. # ## Load and preprocess the dataset # # For simplicity, we import the dataset from the `shap` package, which contains the data in a cleaned format. We start by importing the various modules we're going to use: # + from fairlearn.reductions import GridSearch from fairlearn.reductions import DemographicParity, ErrorRate from fairlearn.datasets import fetch_adult from fairlearn.metrics import MetricFrame, selection_rate from sklearn import svm, neighbors, tree from sklearn.compose import ColumnTransformer, make_column_selector from sklearn.preprocessing import LabelEncoder,StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler, OneHotEncoder from sklearn.svm import SVC from sklearn.metrics import accuracy_score import pandas as pd import numpy as np # SHAP Tabular Explainer from interpret.ext.blackbox import KernelExplainer from interpret.ext.blackbox import MimicExplainer from interpret.ext.glassbox import LGBMExplainableModel # - # We can now load and inspect the data: dataset = fetch_adult(as_frame=True) X_raw, y = dataset['data'], dataset['target'] X_raw["race"].value_counts().to_dict() # We are going to treat the sex of each individual as a protected attribute (where 0 indicates female and 1 indicates male), and in this particular case we are going separate this attribute out and drop it from the main data. We then perform some standard data preprocessing steps to convert the data into a format suitable for the ML algorithms # + sensitive_features = X_raw[['sex','race']] le = LabelEncoder() y = le.fit_transform(y) # - # Finally, we split the data into training and test sets: # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test, sensitive_features_train, sensitive_features_test = \ train_test_split(X_raw, y, sensitive_features, test_size = 0.2, random_state=0, stratify=y) # Work around indexing bug X_train = X_train.reset_index(drop=True) sensitive_features_train = sensitive_features_train.reset_index(drop=True) X_test = X_test.reset_index(drop=True) sensitive_features_test = sensitive_features_test.reset_index(drop=True) # - # ## Training a fairness-unaware predictor # # To show the effect of `Fairlearn` we will first train a standard ML predictor that does not incorporate fairness. For speed of demonstration, we use a simple logistic regression estimator from `sklearn`: # + numeric_transformer = Pipeline( steps=[ ("impute", SimpleImputer()), ("scaler", StandardScaler()), ] ) categorical_transformer = Pipeline( [ ("impute", SimpleImputer(strategy="most_frequent")), ("ohe", OneHotEncoder(handle_unknown="ignore")), ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, make_column_selector(dtype_exclude="category")), ("cat", categorical_transformer, make_column_selector(dtype_include="category")), ] ) model = Pipeline( steps=[ ("preprocessor", preprocessor), ( "classifier", LogisticRegression(solver="liblinear", fit_intercept=True), ), ] ) model.fit(X_train, y_train) # - # ## Generate model explanations # Using SHAP KernelExplainer # clf.steps[-1][1] returns the trained classification model explainer = MimicExplainer(model.steps[-1][1], X_train, LGBMExplainableModel, features=X_raw.columns, classes=['Rejected', 'Approved'], transformations=preprocessor) # ### Generate global explanations # Explain overall model predictions (global explanation) # Explain the model based on a subset of 1000 rows global_explanation = explainer.explain_global(X_test[:1000]) global_explanation.get_feature_importance_dict() # ### Generate local explanations # Explain local data points (individual instances) # You can pass a specific data point or a group of data points to the explain_local function # E.g., Explain the first data point in the test set instance_num = 1 local_explanation = explainer.explain_local(X_test[:instance_num]) # + # Get the prediction for the first member of the test set and explain why model made that prediction prediction_value = model.predict(X_test)[instance_num] sorted_local_importance_values = local_explanation.get_ranked_local_values()[prediction_value] sorted_local_importance_names = local_explanation.get_ranked_local_names()[prediction_value] # - print('local importance values: {}'.format(sorted_local_importance_values)) print('local importance names: {}'.format(sorted_local_importance_names)) # ## Visualize model explanations # Load the interpretability visualization dashboard from raiwidgets import ExplanationDashboard ExplanationDashboard(global_explanation, model, dataset=X_test[:1000], true_y=y_test[:1000]) # We can load this predictor into the Fairness dashboard, and examine how it is unfair: # ## Assess model fairness # Load the fairness visualization dashboard # + from raiwidgets import FairnessDashboard y_pred = model.predict(X_test) FairnessDashboard(sensitive_features=sensitive_features_test, y_true=y_test, y_pred=y_pred) # - # Looking at the disparity in accuracy, we see that males have an error rate about three times greater than the females. More interesting is the disparity in opportunitiy - males are offered loans at three times the rate of females. # # Despite the fact that we removed the feature from the training data, our predictor still discriminates based on sex. This demonstrates that simply ignoring a protected attribute when fitting a predictor rarely eliminates unfairness. There will generally be enough other features correlated with the removed attribute to lead to disparate impact. # ## Mitigation with Fairlearn (GridSearch) # # The `GridSearch` class in `Fairlearn` implements a simplified version of the exponentiated gradient reduction of [Agarwal et al. 2018](https://arxiv.org/abs/1803.02453). The user supplies a standard ML estimator, which is treated as a blackbox. `GridSearch` works by generating a sequence of relabellings and reweightings, and trains a predictor for each. # # For this example, we specify demographic parity (on the protected attribute of sex) as the fairness metric. Demographic parity requires that individuals are offered the opportunity (are approved for a loan in this example) independent of membership in the protected class (i.e., females and males should be offered loans at the same rate). We are using this metric for the sake of simplicity; in general, the appropriate fairness metric will not be obvious. # + # Fairlearn is not yet fully compatible with Pipelines, so we have to pass the estimator only X_train_prep = preprocessor.transform(X_train).toarray() X_test_prep = preprocessor.transform(X_test).toarray() sweep = GridSearch(LogisticRegression(solver="liblinear", fit_intercept=True), constraints=DemographicParity(), grid_size=70) # - # Our algorithms provide `fit()` and `predict()` methods, so they behave in a similar manner to other ML packages in Python. We do however have to specify two extra arguments to `fit()` - the column of protected attribute labels, and also the number of predictors to generate in our sweep. # # After `fit()` completes, we extract the full set of predictors from the `GridSearch` object. # + sweep.fit(X_train_prep, y_train, sensitive_features=sensitive_features_train.sex) predictors = sweep.predictors_ # - # We could load these predictors into the Fairness dashboard now. However, the plot would be somewhat confusing due to their number. In this case, we are going to remove the predictors which are dominated in the error-disparity space by others from the sweep (note that the disparity will only be calculated for the sensitive feature). In general, one might not want to do this, since there may be other considerations beyond the strict optimization of error and disparity (of the given protected attribute). # + accuracies, disparities = [], [] for predictor in predictors: accuracy_metric_frame = MetricFrame(accuracy_score, y_train, predictor.predict(X_train_prep), sensitive_features=sensitive_features_train.sex) selection_rate_metric_frame = MetricFrame(selection_rate, y_train, predictor.predict(X_train_prep), sensitive_features=sensitive_features_train.sex) accuracies.append(accuracy_metric_frame.overall) disparities.append(selection_rate_metric_frame.difference()) all_results = pd.DataFrame({"predictor": predictors, "accuracy": accuracies, "disparity": disparities}) all_models_dict = {"unmitigated": model.steps[-1][1]} dominant_models_dict = {"unmitigated": model.steps[-1][1]} base_name_format = "grid_{0}" row_id = 0 for row in all_results.itertuples(): model_name = base_name_format.format(row_id) all_models_dict[model_name] = row.predictor accuracy_for_lower_or_eq_disparity = all_results["accuracy"][all_results["disparity"] <= row.disparity] if row.accuracy >= accuracy_for_lower_or_eq_disparity.max(): dominant_models_dict[model_name] = row.predictor row_id = row_id + 1 # - # We can construct predictions for all the models, and also for the dominant models: # + from raiwidgets import FairnessDashboard dashboard_all = {} for name, predictor in all_models_dict.items(): value = predictor.predict(X_test_prep) dashboard_all[name] = value dominant_all = {} for name, predictor in dominant_models_dict.items(): dominant_all[name] = predictor.predict(X_test_prep) FairnessDashboard(sensitive_features=sensitive_features_test, y_true=y_test, y_pred=dominant_all) # - # We can look at just the dominant models in the dashboard: # We see a Pareto front forming - the set of predictors which represent optimal tradeoffs between accuracy and disparity in predictions. In the ideal case, we would have a predictor at (1,0) - perfectly accurate and without any unfairness under demographic parity (with respect to the protected attribute "sex"). The Pareto front represents the closest we can come to this ideal based on our data and choice of estimator. Note the range of the axes - the disparity axis covers more values than the accuracy, so we can reduce disparity substantially for a small loss in accuracy. # # By clicking on individual models on the plot, we can inspect their metrics for disparity and accuracy in greater detail. In a real example, we would then pick the model which represented the best trade-off between accuracy and disparity given the relevant business constraints. # # AzureML integration # # We will now go through a brief example of the AzureML integration. # # The required package can be installed via: # # ``` # pip install azureml-contrib-fairness # pip install azureml-interpret # ``` # ## Connect to workspace # # Just like in the previous tutorials, we will need to connect to a [workspace](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.workspace(class)?view=azure-ml-py). # # The following code will allow you to create a workspace if you don't already have one created. You must have an Azure subscription to create a workspace: # # ```python # from azureml.core import Workspace # ws = Workspace.create(name='myworkspace', # subscription_id='<azure-subscription-id>', # resource_group='myresourcegroup', # create_resource_group=True, # location='eastus2') # ``` # # **If you are running this on a Notebook VM, you can import the existing workspace.** # + from azureml.core import Workspace ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n') # - # ## Registering models # # The fairness dashboard is designed to integrate with registered models, so we need to do this for the models we want in the Studio portal. The assumption is that the names of the models specified in the dashboard dictionary correspond to the `id`s (i.e. `<name>:<version>` pairs) of registered models in the workspace. # Next, we register each of the models in the `dashboard_predicted` dictionary into the workspace. For this, we have to save each model to a file, and then register that file: # + import joblib import os from azureml.core import Model, Experiment, Run os.makedirs('models', exist_ok=True) def register_model(name, model): print("Registering ", name) model_path = "models/{0}.pkl".format(name) joblib.dump(value=model, filename=model_path) registered_model = Model.register(model_path=model_path, model_name=name, workspace=ws) print("Registered ", registered_model.id) return registered_model.id model_name_id_mapping = dict() for name, model in dashboard_all.items(): m_id = register_model(name, model) model_name_id_mapping[name] = m_id # - # Now, produce new predictions dictionaries, with the updated names: dashboard_all_ids = dict() for name, y_pred in dashboard_all.items(): dashboard_all_ids[model_name_id_mapping[name]] = y_pred # ## Uploading a dashboard # # We create a _dashboard dictionary_ using Fairlearn's `metrics` package. The `_create_group_metric_set` method has arguments similar to the Dashboard constructor, except that the sensitive features are passed as a dictionary (to ensure that names are available), and we must specify the type of prediction. Note that we use the `dashboard_registered` dictionary we just created: # + sf = { 'sex': sensitive_features_test.sex, 'race': sensitive_features_test.race } from fairlearn.metrics._group_metric_set import _create_group_metric_set dash_dict_all = _create_group_metric_set(y_true=y_test, predictions=dashboard_all_ids, sensitive_features=sf, prediction_type='binary_classification') # - # Now, we import our `contrib` package which contains the routine to perform the upload: from azureml.contrib.fairness import upload_dashboard_dictionary, download_dashboard_by_upload_id # Now we can create an Experiment, then a Run, and upload our dashboard to it: # + exp = Experiment(ws, 'responsible-ai-loan-decision') print(exp) run = exp.start_logging() try: dashboard_title = "Upload MultiAsset from Grid Search with Census Data Notebook" upload_id = upload_dashboard_dictionary(run, dash_dict_all, dashboard_name=dashboard_title) print("\nUploaded to id: {0}\n".format(upload_id)) downloaded_dict = download_dashboard_by_upload_id(run, upload_id) finally: run.complete() # - # ## Uploading explanations # # # # + from azureml.interpret import ExplanationClient client = ExplanationClient.from_run(run) client.upload_model_explanation(global_explanation, comment = "census data global explanation")
how-to-use-azureml/responsible-ai/visualize-upload-loan-decision/rai-loan-decision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Loading and Preprocessing Data # In this notebook you will learn how to use TensorFlow's Data API to load and preprocess data efficiently, then you will learn about the efficient `TFRecord` binary format for storing your data. # ## Imports # %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import sklearn import sys import tensorflow as tf from tensorflow import keras import time print("python", sys.version) for module in mpl, np, pd, sklearn, tf, keras: print(module.__name__, module.__version__) assert sys.version_info >= (3, 5) # Python ≥3.5 required assert hasattr(tf, "function") # TensorFlow ≥2.0 required # ## Code examples # You can browse through the code examples or jump directly to the exercises. dataset = tf.data.Dataset.from_tensor_slices(np.arange(10)) dataset for item in dataset: print(item) dataset = dataset.repeat(3).batch(7) # + tags=["raises-exception"] for item in dataset: print(item) # - dataset = dataset.interleave( lambda v: tf.data.Dataset.from_tensor_slices(v), cycle_length=3, block_length=2) # + tags=["raises-exception"] for item in dataset: print(item.numpy(), end=" ") # - # ## Split the California dataset to multiple CSV files # Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it: # + from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler housing = fetch_california_housing() X_train_full, X_test, y_train_full, y_test = train_test_split( housing.data, housing.target.reshape(-1, 1), random_state=42) X_train, X_valid, y_train, y_valid = train_test_split( X_train_full, y_train_full, random_state=42) scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_valid_scaled = scaler.transform(X_valid) X_test_scaled = scaler.transform(X_test) # - # For very large datasets that do not fit in memory, you will typically want to split it into many files first, then have TensorFlow read these files in parallel. To demonstrate this, let's start by splitting the scaled housing dataset and saving it to 20 CSV files: def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10): housing_dir = os.path.join("datasets", "housing") os.makedirs(housing_dir, exist_ok=True) path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv") filenames = [] m = len(data) for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)): part_csv = path_format.format(name_prefix, file_idx) filenames.append(part_csv) with open(part_csv, "wt", encoding="utf-8") as f: if header is not None: f.write(header) f.write("\n") for row_idx in row_indices: f.write(",".join([repr(col) for col in data[row_idx]])) f.write("\n") return filenames # + train_data = np.c_[X_train_scaled, y_train] valid_data = np.c_[X_valid_scaled, y_valid] test_data = np.c_[X_test_scaled, y_test] header_cols = ["Scaled" + name for name in housing.feature_names] + ["MedianHouseValue"] header = ",".join(header_cols) train_filenames = save_to_multiple_csv_files(train_data, "train", header, n_parts=20) valid_filenames = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10) test_filenames = save_to_multiple_csv_files(test_data, "test", header, n_parts=10) # - # Okay, now let's take a peek at the first few lines of one of these CSV files: with open(train_filenames[0]) as f: for i in range(3): print(f.readline(), end="") # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 1 – Data API # 1) Use `tf.data.Dataset.list_files()` to create a dataset that will simply list the training filenames. Iterate through its items and print them. # 2) Use the filename dataset's `interleave()` method to create a dataset that will read from these CSV files, interleaving their lines. The first argument needs to be a function (e.g., a `lambda`) that creates a `tf.data.TextLineDataset` based on a filename, and you must also set `cycle_length=5` so that the reader interleaves data from 5 files at a time. Print the first 15 elements from this dataset to see that you do indeed get interleaved lines from multiple CSV files (you should get the first line from 5 files, then the second line from these same files, then the third lines). **Tip**: To get only the first 15 elements, you can call the dataset's `take()` method. # 3) We do not care about the header lines, so let's skip them. You can use the `skip()` method for this. Print the first five elements of your final dataset to make sure it does not print any header lines. **Tip**: make sure to call `skip()` for each `TextLineDataset`, not for the interleave dataset. # 4) We need to parse these CSV lines. First, experiment with the `tf.io.decode_csv()` function using the example below (e.g., look at the types, try changing or removing some field values, etc.). # * You need to pass it the line to parse, and set the `record_defaults` argument. This must be an array containing the default value for each field, in case it is missing. This also tells TensorFlow the number of fields to expect, and the type of each field. If you do not want a default value for a given field, you must use an empty tensor of the appropriate type (e.g., `tf.constant([])` for a `float32` field, or `tf.constant([], dtype=tf.int64` for an `int64` field). record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])] parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults) parsed_fields # 5) Now you are ready to create a function to parse a CSV line: # * Create a `parse_csv_line()` function that takes a single line as argument. # * Call `tf.io.decode_csv()` to parse that line. # * Call `tf.stack()` to create a single tensor containing all the input features (i.e., all fields except the last one). # * Reshape the labels field (i.e., the last field) to give it a shape of `[1]` instead of `[]` (i.e., it must not be a scalar). You can use `tf.reshape(label_field, [1])`, or call `tf.stack([label_field])`, or use `label_field[tf.newaxis]`. # * Return a tuple with both tensors (input features and labels). # * Try calling it on a single line from one of the CSV files. # 6) Now create a `csv_reader_dataset()` function that takes a list of CSV filenames and returns a dataset that will provide batches of parsed and shuffled data from these files, including the features and labels, repeating the whole data once per epoch. # # **Tips**: # * Copy your code from above to get a dataset that returns interleaved lines from the given CSV files. Your function will need an argument for the `filenames`, and another for the number of files read in parallel at any given time (e.g., `n_reader`). # * The training algorithm will need to go through the dataset many times, so you should call `repeat()` on the filenames dataset. You do not need to specify a number of repetitions, as we will tell Keras the number of iterations to run later on. # * Gradient descent works best when the data is IID (independent and identically distributed), so you should call the `shuffle()` method. It will require the shuffling buffer size, which you can add as an argument to your function (e.g., `shuffle_buffer_size`). # * Use the `map()` method to apply the `parse_csv_line()` function to each CSV line. You can set the `num_parallel_calls` argument to the number of threads that will parse lines in parallel. This should probably be an argument of your function (e.g., `n_parse_threads`). # * Use the `batch()` method to bundle records into batches. You will need to specify the batch size. This should probably be an argument of your function (e.g., `batch_size`). # * Call `prefetch(1)` on your final dataset to ensure that the next batch is loaded and parsed while the rest of your computations take place in parallel (to avoid blocking for I/O). # * Return the resulting dataset. # * Give every argument a reasonable default value (except for the filenames). # * Test your function by calling it with a small batch size and printing the first couple of batches. # * For higher performance, you can replace `dataset.map(...).batch(...)` with `dataset.apply(map_and_batch(...))`, where `map_and_batch()` is an experimental function located in `tf.data.experimental`. It will be deprecated in future versions of TensorFlow when such pipeline optimizations become automatic. # 7) Build a training set, a validation set and a test set using your `csv_reader_dataset()` function. # 8) Build and compile a Keras model for this regression task, and use your datasets to train it, evaluate it and make predictions for the test set. # # **Tips** # * Instead of passing `X_train_scaled, y_train` to the `fit()` method, pass the training dataset and specify the `steps_per_epoch` argument. This should be set to the number of instances in the training set divided by the batch size. # * Similarly, pass the validation dataset instead of `(X_valid_scaled, y_valid)` and `y_valid`, and set the `validation_steps`. # * For the `evaluate()` and `predict()` methods, you need to pass the test dataset, and specify the `steps` argument. # * The `predict()` method ignores the labels in the test dataset, but if you want to be extra sure that it does not cheat, you can create a new dataset by stripping away the labels from the test set (e.g., `test_set.map(lambda X, y: X)`). # ![Exercise solution](https://camo.githubusercontent.com/250388fde3fac9135ead9471733ee28e049f7a37/68747470733a2f2f75706c6f61642e77696b696d656469612e6f72672f77696b6970656469612f636f6d6d6f6e732f302f30362f46696c6f735f736567756e646f5f6c6f676f5f253238666c69707065642532392e6a7067) # ## Exercise 1 – Solution # 1) Use `tf.data.Dataset.list_files()` to create a dataset that will simply list the training filenames. Iterate through its items and print them. filename_dataset = tf.data.Dataset.list_files(train_filenames) for filename in filename_dataset: print(filename) # 2) Use the filename dataset's `interleave()` method to create a dataset that will read from these CSV files, interleaving their lines. The first argument needs to be a function (e.g., a `lambda`) that creates a `tf.data.TextLineDataset` based on a filename, and you must also set `cycle_length=5` so that the reader interleaves data from 5 files at a time. Print the first 15 elements from this dataset to see that you do indeed get interleaved lines from multiple CSV files (you should get the first line from 5 files, then the second line from these same files, then the third lines). **Tip**: To get only the first 15 elements, you can call the dataset's `take()` method. n_readers = 5 dataset = filename_dataset.interleave( lambda filename: tf.data.TextLineDataset(filename), cycle_length=n_readers) for line in dataset.take(15): print(line.numpy()) # 3) We do not care about the header lines, so let's skip them. You can use the `skip()` method for this. Print the first five elements of your final dataset to make sure it does not print any header lines. **Tip**: make sure to call `skip()` for each `TextLineDataset`, not for the interleave dataset. dataset = filename_dataset.interleave( lambda filename: tf.data.TextLineDataset(filename).skip(1), cycle_length=n_readers) for line in dataset.take(5): print(line.numpy()) # 4) We need to parse these CSV lines. First, experiment with the `tf.io.decode_csv()` function using the example below (e.g., look at the types, try removing some field values, etc.). # Notice that field 4 is interpreted as a string. record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])] parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults) parsed_fields # Notice that all missing fields are replaced with their default value, when provided: parsed_fields = tf.io.decode_csv(',,,,5', record_defaults) parsed_fields # The 5th field is compulsory (since we provided `tf.constant([])` as the "default value"), so we get an exception if we do not provide it: try: parsed_fields = tf.io.decode_csv(',,,,', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) # The number of fields should match exactly the number of fields in the `record_defaults`: try: parsed_fields = tf.io.decode_csv('1,2,3,4,5,6,7', record_defaults) except tf.errors.InvalidArgumentError as ex: print(ex) # 5) Now you are ready to create a function to parse a CSV line: # * Create a `parse_csv_line()` function that takes a single line as argument. # * Call `tf.io.decode_csv()` to parse that line. # * Call `tf.stack()` to create a single tensor containing all the input features (i.e., all fields except the last one). # * Reshape the labels field (i.e., the last field) to give it a shape of `[1]` instead of `[]` (i.e., it must not be a scalar). You can use `tf.reshape(label_field, [1])`, or call `tf.stack([label_field])`, or use `label_field[tf.newaxis]`. # * Return a tuple with both tensors (input features and labels). # * Try calling it on a single line from one of the CSV files. # + n_inputs = X_train.shape[1] def parse_csv_line(line, n_inputs=n_inputs): defs = [np.nan] * (n_inputs + 1) fields = tf.io.decode_csv(line, record_defaults=defs) x = tf.stack(fields[:-1]) y = tf.stack(fields[-1:]) return x, y # - parse_csv_line(b'-0.739840972632228,-0.3658395634576743,-0.784679995482575,0.07414513752253027,0.7544706668961565,0.407700592469922,-0.686992593958441,0.6019005115704453,2.0') # 6) Now create a `csv_reader_dataset()` function that takes a list of CSV filenames and returns a dataset that will provide batches of parsed and shuffled data from these files, including the features and labels, repeating the whole data once per epoch. def csv_reader_dataset(filenames, n_parse_threads=5, batch_size=32, shuffle_buffer_size=10000, n_readers=5): dataset = tf.data.Dataset.list_files(filenames) dataset = dataset.repeat() dataset = dataset.interleave( lambda filename: tf.data.TextLineDataset(filename).skip(1), cycle_length=n_readers) dataset.shuffle(shuffle_buffer_size) dataset = dataset.map(parse_csv_line, num_parallel_calls=n_parse_threads) dataset = dataset.batch(batch_size) return dataset.prefetch(1) # This version uses `map_and_batch()` to get a performance boost (but remember that this feature is experimental and will eventually be deprecated, as explained earlier): def csv_reader_dataset(filenames, batch_size=32, shuffle_buffer_size=10000, n_readers=5): dataset = tf.data.Dataset.list_files(filenames) dataset = dataset.repeat() dataset = dataset.interleave( lambda filename: tf.data.TextLineDataset(filename).skip(1), cycle_length=n_readers) dataset.shuffle(shuffle_buffer_size) dataset = dataset.apply( tf.data.experimental.map_and_batch( parse_csv_line, batch_size, num_parallel_calls=tf.data.experimental.AUTOTUNE)) return dataset.prefetch(1) train_set = csv_reader_dataset(train_filenames, batch_size=3) for X_batch, y_batch in train_set.take(2): print("X =", X_batch) print("y =", y_batch) print() # 7) Build a training set, a validation set and a test set using your `csv_reader_dataset()` function. batch_size = 32 train_set = csv_reader_dataset(train_filenames, batch_size) valid_set = csv_reader_dataset(valid_filenames, batch_size) test_set = csv_reader_dataset(test_filenames, batch_size) # 8) Build and compile a Keras model for this regression task, and use your datasets to train it, evaluate it and make predictions for the test set. model = keras.models.Sequential([ keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]), keras.layers.Dense(1), ]) model.compile(loss="mse", optimizer="sgd") model.fit(train_set, steps_per_epoch=len(X_train) // batch_size, epochs=10, validation_data=valid_set, validation_steps=len(X_valid) // batch_size) model.evaluate(test_set, steps=len(X_test) // batch_size) new_set = test_set.map(lambda X, y: X) model.predict(new_set, steps=len(X_test) // batch_size) # ![Exercise](https://c1.staticflickr.com/9/8101/8553474140_c50cf08708_b.jpg) # ## Exercise 2 – The `TFRecord` binary format # ### Code examples # You can walk through these code examples or jump down to the [actual exercise](#Actual-exercise) below. favorite_books = [name.encode("utf-8") for name in ["Arluk", "Fahrenheit 451", "L'étranger"]] favorite_books = tf.train.BytesList(value=favorite_books) favorite_books hours_per_month = tf.train.FloatList(value=[20.5, 18.0, np.nan, 6.0, 17.5]) hours_per_month age = tf.train.Int64List(value=[42]) age coordinates = tf.train.FloatList(value=[1.2834, 103.8607]) coordinates features=tf.train.Features( feature={ "favorite_books": tf.train.Feature(bytes_list=favorite_books), "hours_per_month": tf.train.Feature(float_list=hours_per_month), "age": tf.train.Feature(int64_list=age), "coordinates": tf.train.Feature(float_list=coordinates), } ) features example = tf.train.Example(features=features) example serialized_example = example.SerializeToString() serialized_example filename = "my_reading_data.tfrecords" with tf.io.TFRecordWriter(filename) as writer: for i in range(5): # you should save different examples instead! :) writer.write(serialized_example) for serialized_example_tensor in tf.data.TFRecordDataset([filename]): print(serialized_example_tensor) # + expected_features = { "favorite_books": tf.io.VarLenFeature(dtype=tf.string), "hours_per_month": tf.io.VarLenFeature(dtype=tf.float32), "age": tf.io.FixedLenFeature([], dtype=tf.int64), "coordinates": tf.io.FixedLenFeature([2], dtype=tf.float32), } for serialized_example_tensor in tf.data.TFRecordDataset([filename]): print(tf.io.parse_single_example(serialized_example_tensor, expected_features)) # - # ## Actual exercise # TODO: add an exercise to convert CSV files to TFRecord files, and create a `TFDatasetReader` to load the data.
src/tf2_course-master/03_loading_and_preprocessing_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Developing Traffic Light Labels # + import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from PIL import Image from PIL import ImageDraw from PIL import ImageColor import time from scipy.stats import norm # %matplotlib inline plt.style.use('ggplot') # - # Frozen inference graph files. NOTE: change the path to where you saved the models. SSD_GRAPH_FILE = './udacity_object_detection_frozen_models/ssd_mobilenet_v1_coco_11_06_2017/frozen_inference_graph.pb' RFCN_GRAPH_FILE = './udacity_object_detection_frozen_models/rfcn_resnet101_coco_11_06_2017/frozen_inference_graph.pb' FASTER_RCNN_GRAPH_FILE = './udacity_object_detection_frozen_models/faster_rcnn_inception_resnet_v2_atrous_coco_11_06_2017/frozen_inference_graph.pb' # Below are utility functions. The main purpose of these is to draw the bounding boxes back onto the original image. # + # Colors (one for each class) cmap = ImageColor.colormap print("Number of colors =", len(cmap)) COLOR_LIST = sorted([c for c in cmap.keys()]) # # Utility funcs # def filter_boxes(min_score, boxes, scores, classes): """Return boxes with a confidence >= `min_score`""" n = len(classes) idxs = [] for i in range(n): if scores[i] >= min_score: idxs.append(i) filtered_boxes = boxes[idxs, ...] filtered_scores = scores[idxs, ...] filtered_classes = classes[idxs, ...] return filtered_boxes, filtered_scores, filtered_classes def to_image_coords(boxes, height, width): """ The original box coordinate output is normalized, i.e [0, 1]. This converts it back to the original coordinate based on the image size. """ box_coords = np.zeros_like(boxes) box_coords[:, 0] = boxes[:, 0] * height box_coords[:, 1] = boxes[:, 1] * width box_coords[:, 2] = boxes[:, 2] * height box_coords[:, 3] = boxes[:, 3] * width return box_coords def draw_boxes(image, boxes, classes, thickness=4): """Draw bounding boxes on the image""" draw = ImageDraw.Draw(image) for i in range(len(boxes)): bot, left, top, right = boxes[i, ...] class_id = int(classes[i]) color = COLOR_LIST[class_id] draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color) def load_graph(graph_file): """Loads a frozen inference graph""" graph = tf.Graph() with graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(graph_file, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return graph # + #detection_graph = load_graph(SSD_GRAPH_FILE) detection_graph = load_graph(RFCN_GRAPH_FILE) #detection_graph = load_graph(FASTER_RCNN_GRAPH_FILE) # The input placeholder for the image. # `get_tensor_by_name` returns the Tensor with the associated name in the Graph. image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # The classification of the object (integer id). detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') # + import os import csv import pandas as pd df = pd.DataFrame data_paths = ['./train/green','./train/yellow','./train/red','./train/no'] #data_paths = ['./train/no'] process_path = './train/processed' images_name = [] proccessed_names = [] data_labels = [] for i,data_path in enumerate(data_paths): images = os.listdir(data_path) #print(images) for image in images: images_name.append(data_path+'/'+image) proccessed_names.append(process_path+'/'+image) data_labels.append(i+1) print(images_name) print(data_labels) # - # Load a sample image. #image = Image.open('./assets/sample1.jpg') #'./train/green/915_8540_3.jpg' # image_name = './train/yellow/left0789.jpg' #'./train/red/040_8540_3.jpg' total_boxes = [] for i, image_name in enumerate(images_name): #image_name = './train/yellow/left0789.jpg' image = Image.open(image_name) image_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0) with tf.Session(graph=detection_graph) as sess: # Actual detection. (boxes, scores, classes) = sess.run([detection_boxes, detection_scores, detection_classes], feed_dict={image_tensor: image_np}) # Remove unnecessary dimensions boxes = np.squeeze(boxes) scores = np.squeeze(scores) classes = np.squeeze(classes) confidence_cutoff = 0.8 # Filter boxes with a confidence score less than `confidence_cutoff` boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes) # The current box coordinates are normalized to a range between 0 and 1. # This converts the coordinates actual location on the image. width, height = image.size box_coords = to_image_coords(boxes, height, width) total_boxes.append(box_coords) # Each class with be represented by a differently colored box draw_boxes(image, box_coords, classes) print(classes) print(scores) plt.figure(figsize=(12, 8)) plt.imshow(image) plt.savefig(proccessed_names[0]) # + ''' print(box_coords) class_dict = { 1: 'green', # List of class map Text with byte 2: 'yellow' } print(class_dict[1]) print(total_yellow_boxes[2]) ''' images = [] c = np.empty([1,5]) #print(c) i = -1 for one_image_boxs,image_name in zip(total_boxes,images_name): i = i+1 for box in one_image_boxs: b2 = box b2 = np.append(b2,[data_labels[i]]) #print(b2) c = np.vstack([c,b2]) images.append(image_name) file_name = './train/processed/labeld_data.csv' #c.reshape((5,3)) c = np.delete(c, (0), axis=0) print(c) sz = np.size(c)/5 print(sz) d = np.asarray(images).reshape((145,1)) print(len(c)) print(len(d)) df = pd.DataFrame(c) df['b'] = d df.to_csv(file_name) #e = np.column_stack([c,d[:,1]]) print(images) #a = numpy.asarray([ [1,2,3], [4,5,6], [7,8,9] ]) #np.savetxt(file_name, c, delimiter=",", fmt="%s") # -
Object_detection/Developing Traffic Light Labels .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # > This is one of the 100 recipes of the [IPython Cookbook](http://ipython-books.github.io/), the definitive guide to high-performance scientific computing and data science in Python. # # + [markdown] word_id="4818_07_kde" # # 7.6. Estimating a probability distribution nonparametrically with a Kernel Density Estimation # - # You need to download the *Storms* dataset on the book's website, and extract it in the current directory. (http://ipython-books.github.io) # # You also need matplotlib's toolkit *basemap*. (http://matplotlib.org/basemap/) # 1. Let's import the usual packages. The kernel density estimation with a Gaussian kernel is implemented in *SciPy.stats*. import numpy as np import pandas as pd import scipy.stats as st import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap # %matplotlib inline # 2. Let's open the data with Pandas. # http://www.ncdc.noaa.gov/ibtracs/index.php?name=wmo-data df = pd.read_csv("data/Allstorms.ibtracs_wmo.v03r05.csv") # 3. The dataset contains information about most storms since 1848. A single storm may appear multiple times across several consecutive days. df[df.columns[[0,1,3,8,9]]].head() # 4. We use Pandas' `groupby` function to obtain the average location of every storm. dfs = df.groupby('Serial_Num') pos = dfs[['Latitude', 'Longitude']].mean() y, x = pos.values.T pos.head() # 5. We display the storms on a map with basemap. This toolkit allows us to easily project the geographical coordinates on the map. m = Basemap(projection='mill', llcrnrlat=-65 ,urcrnrlat=85, llcrnrlon=-180, urcrnrlon=180) x0, y0 = m(-180, -65) x1, y1 = m(180, 85) plt.figure(figsize=(10,6)) m.drawcoastlines() m.fillcontinents(color='#dbc8b2') xm, ym = m(x, y) m.plot(xm, ym, '.r', alpha=.1); # 6. To perform the Kernel Density Estimation, we need to stack the x and y coordinates of the storms into a 2xN array. h = np.vstack((xm, ym)) kde = st.gaussian_kde(h) # 7. The `gaussian_kde` routine returned a Python function. To see the results on a map, we need to evaluate this function on a 2D grid spanning the entire map. We create this grid with `meshgrid`, and we pass the x, y values to the `kde` function. We need to arrange the shape of the array since `kde` accepts a 2xN array as input. k = 50 tx, ty = np.meshgrid(np.linspace(x0, x1, 2*k), np.linspace(y0, y1, k)) v = kde(np.vstack((tx.ravel(), ty.ravel()))).reshape((k, 2*k)) # 8. Finally, we display the estimated density with `imshow`. plt.figure(figsize=(10,6)) m.drawcoastlines() m.fillcontinents(color='#dbc8b2') xm, ym = m(x, y) m.imshow(v, origin='lower', extent=[x0,x1,y0,y1], cmap=plt.get_cmap('Reds')); # > You'll find all the explanations, figures, references, and much more in the book (to be released later this summer). # # > [IPython Cookbook](http://ipython-books.github.io/), by [<NAME>](http://cyrille.rossant.net), Packt Publishing, 2014 (500 pages).
notebooks/chapter07_stats/06_kde.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.3 64-bit (''base'': conda)' # language: python # name: python373jvsc74a57bd0210f9608a45c0278a93c9e0b10db32a427986ab48cfc0d20c139811eb78c4bbc # --- import nltk from nltk.stem.porter import * from torch.nn import * from torch.optim import * import numpy as np import pandas as pd import torch,torchvision import random from tqdm import * from torch.utils.data import Dataset,DataLoader stemmer = PorterStemmer() def tokenize(sentence): return nltk.word_tokenize(sentence) tokenize('$1000') def stem(word): return stemmer.stem(word.lower()) stem('organic') def bag_of_words(tokenized_words,all_words): tokenized_words = [stem(w) for w in tokenized_words] bag = np.zeros(len(all_words)) for idx,w in enumerate(all_words): if w in tokenized_words: bag[idx] = 1.0 return bag bag_of_words(['hi'],['how','hi']) data = pd.read_csv('./data.csv')[:2500] X = data['comment'] y = data['subreddit'] all_words = [] all_data = [] tags = [] for X_batch,y_batch in tqdm(zip(X,y)): X_batch = tokenize(X_batch) new_X = [] for Xb in X_batch: new_X.append(stem(Xb)) all_words.extend(new_X) all_data.append([new_X,y_batch]) tags.append(y_batch) all_words = sorted(set(all_words)) tags = sorted(set(tags)) len(tags)
00.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/prashant90066/18CSE049/blob/main/prac.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="kqdsHdhVL5YZ" a=5+3 # + id="AxHsZg8FL-Rj" outputId="76c8d4e6-493d-4a14-f39a-1a7489c909fd" colab={"base_uri": "https://localhost:8080/"} print(a) # + id="nVjTj2OOMAw1"
prac.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Two particles in an infinite square well # <NAME>, 2021 # # Here I reproduce the figures in Ch. 13 of McIntyre for two particles in an infinite square well. # I'm assuming the following values for the constants: # $$ # \hbar = 1,\qquad # m =1, \qquad # L = \pi # $$ import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # In the cell below, uncomment the case you want to plot. # + # Expressions for the joint pdfs of the 4 cases #distinguishable (known energies) p1 = lambda x1, x2: 2./np.pi**2 *(np.sin(x1)**2*np.sin(2.*x2)**2 ) # distinguishable (mixture) p2 = lambda x1, x2: 2./np.pi**2 *(np.sin(x1)**2*np.sin(2.*x2)**2 + np.sin(2.*x1)**2*np.sin(x2)**2) # indistinguishable symmetric p3 = lambda x1, x2: 1./np.pi**2 *(np.sin(x1)*np.sin(2.*x2) + np.sin(2.*x1)*np.sin(x2))**2 # indistinguishable antisymmetric p4 = lambda x1, x2: 1./np.pi**2 *(np.sin(x1)*np.sin(2.*x2) - np.sin(2.*x1)*np.sin(x2))**2 # make lists of the functions and names p= [p1, p2, p3, p4] n = ["Distinguishable (known energies)", "Distinguishable (mixture)", "Indistinguishable symmetric", "Indistinguishable anti-symmetric"] # - # prepare the grid of coordinates for plotting x1grid,x2grid=np.meshgrid(np.linspace(0.,np.pi,50),np.linspace(0.,np.pi,50)) # plot as an intensity plot fig, axs = plt.subplots(2, 2, figsize=(8,8)) axs = axs.flatten() for i in range(4): axs[i].imshow(p[i](x1grid,x2grid), origin='lower') axs[i].set_xlabel("$x_1$",size=16) axs[i].set_ylabel("$x_2$",size=16) axs[i].set_title(n[i]) axs[i].set_aspect(1) axs[i].set_xticks([0,50]) axs[i].set_xticklabels(['0','π']) axs[i].set_yticks([0,50]) axs[i].set_yticklabels(['0','π']) # plot as contour plots (trying to match style of McIntyre) fig, axs = plt.subplots(2, 2, figsize=(8,8)) axs = axs.flatten() for i in range(4): axs[i].contourf(x1grid,x2grid,p[i](x1grid,x2grid),cmap='gray',levels=25,vmin=0.1) axs[i].contour(x1grid,x2grid,p[i](x1grid,x2grid),colors='black',levels=25,linewidths=0.2,vmin=0.1) axs[i].set_xlabel('$x_1$') axs[i].set_ylabel('$x_2$') axs[i].set_title(n[i]) axs[i].set_aspect(1) axs[i].set_xticks([0,np.pi]) axs[i].set_xticklabels(['0','π']) axs[i].set_yticks([0,np.pi]) axs[i].set_yticklabels(['0','π']) plt.savefig('DISW-contour.png') # The RMS distance between the particles is given by # $$ # \mathrm{RMS} = \sqrt{\left<(x_1-x_2)^2 \right>} # = \left[ \int\limits_{0}^{\pi} \int\limits_{0}^{\pi} # (x_1-x_2)^2 p(x_1,x_2)\,dx_1\,dx_2 # \right]^{1/2} # $$ # Because I didn't normalize the pdf in the definition, I'll have to also divide the expectation value by $ \int_0^\pi \int_0^\pi p(x_1,x_2)\,dx_1\,dx_2$. # calculate RMS distances for each case. Approximate integrals by sums. rms = [np.sqrt(np.sum((x1grid-x2grid)**2*f(x1grid,x2grid))/np.sum(f(x1grid,x2grid))) for f in p] print("RMS distances") for i in range(4): print("{}: {:.3f}".format(n[i],rms[i]))
ISW2particle.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Quality Analysis # language: python # name: venv-qa # --- # <h1 style="color: white; # font-size: 46px; # text-align: center; # background: #00ccff; # padding: 30px; # ">Quality Report</h1> from model_image import ModelImage from quality_analysis import QualityAnalysis # # Image Binary Classification Model with TF dataset # # This first example is an image binary classification model trained on the 'cats vs dogs' Kaggle dataset. The data is loaded as a TF dataset and automatically resized to fit the dimensions of the model's input layer. model_path ='../saved_model/image-binary-classification-cats-vs-dogs' data_tf = 'cats_vs_dogs' # The cell below loads the model and the data. The labels (```y_true```) and the predictions (```y_pred```) are saved for the functionality analysis. y_true, y_pred, _ = ModelImage().load_model(model_path).load_data(data_tf=data_tf).predict() # To perform the functionality analysis, run the cell below. Since the model is a binary classification model, this analysis includes the value of the main classification metrics, the confusion matrix and the ROC curve. QualityAnalysis(y_true, y_pred, 'binary classification').evaluate() # # Image Multiclass Classification Model with downloaded data # # The data for the next example is downloaded passing the argument ```data_url``` to load_data. To access the downloaded data, use ```data_dir``` to load it from the directory. model_path = '../saved_model/image-multiclass-classification-flowers' data_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = '../datasets/flower_photos' # The method predict() also calculates the predictions for adversarial examples. Setting ```adv=True```, the output of this method is a a tuple with the expected, predicted and adversarial labels. y_true, y_pred, y_adv = ModelImage().load_model(model_path).load_data(data_dir=data_dir).predict(adv=True) QualityAnalysis(y_true, y_pred, 'multiclass classification').evaluate() # Now, evaluate the robustness of the model checking the performance with adversarial examples. QualityAnalysis(y_true, y_adv, 'multiclass classification').evaluate() # To see an example of the adversarial images, set ```plot=True```. The value of ```epsilon``` changes the perturbations that distort the original image. y_true, y_pred, y_adv = ModelImage()\ .load_model(model_path).load_data(data_dir=data_dir)\ .predict(num_examples=1, adv=True, epsilon=0.1, plot=True) # # Regression Model # # The model also can address a regression problem. In this case, the functionality analysis will calculate the values of the main regression metrics and plot the expected labels againts the predictions. # + y_true = [0.23, 0.42, 0.97, 0.31, 0.67, 0.26, 0.52, 0.91, 0.15, 0.49] y_pred = [0.19, 0.60, 0.82, 0.33, 0.72, 0.35, 0.66, 0.86, 0.23, 0.55] QualityAnalysis(y_true, y_pred, 'regression').evaluate() # - # # Next steps # # How to improve this quality analysis: # - Add methods to data.py to load and preprocess text # # # - Automate the quality analysis based on the 'data_type' and 'task' attributes # # # - Add comprehensibility analyses # # # - Add quality analysis for unsupervised tasks # # # Challenges: # - Handle different input and output shapes # # # - Preprocess the data to fit the input data of the model
ml-model-quality-analysis/quality_report.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np import matplotlib.pyplot as plt import os # %matplotlib inline # + pycharm={"name": "#%%\n"} real_alpha = 51.6 img = cv2.imread('sti_imgs/fai=%s.png' % str(real_alpha)) # img = cv2.imread('sti/test.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) length = min(img.shape[0], img.shape[1]) length = length if length % 2 == 0 else length -1 img = img[0:length, 0:length] plt.imshow(img, 'gray') plt.show() # + pycharm={"name": "#%%\n"} img_f = np.fft.fft2(img) img_ac = np.fft.fftshift(np.fft.ifft2(np.abs(img_f) ** 2)) width, height = img_ac.shape img_ac_show = abs(img_ac) img_ac_show = img_ac_show / img_ac_show[width // 2][height // 2] plt.imshow(img_ac_show, 'gray') # + pycharm={"name": "#%%\n"} img_f_real = np.real(img_f) img_f_real = cv2.normalize(img_f_real, None, 0, 255, cv2.NORM_MINMAX) plt.imshow(img_f_real, 'gray') # + pycharm={"name": "#%%\n"} img_ac1 = cv2.filter2D(img, -1, img, anchor=(-1, -1)) plt.imshow(img_ac1, 'gray') # + pycharm={"name": "#%%\n"} # center = (width / 2, height / 2) # maxRadius = width / 2 * np.sqrt(2) # img_ac_polar = cv2.linearPolar(img_ac_show, center, maxRadius, # cv2.WARP_FILL_OUTLIERS + cv2.INTER_LINEAR) # plt.imshow(img_ac_polar, 'gray') # + pycharm={"name": "#%%\n"} from LinearPolar import RecTangle2Polar theta_num = 180 img_ac_polar = RecTangle2Polar(img_ac_show, [width//2, theta_num]) plt.imshow(img_ac_polar, 'gray') # + pycharm={"name": "#%%\n"} mu = {} for i in range(img_ac_polar.shape[0]): mu_temp = sum(img_ac_polar[i, :]) - (img_ac_polar[i, 0] + img_ac_polar[i, img_ac_polar.shape[1]-1]) / 2 mu[i] = mu_temp mu_ordered = sorted(mu.items(), key = lambda x:x[1], reverse=True) fai = 90 - list(mu_ordered)[0][0] * 360 / theta_num % 90 print('real_fai=%.2f, cal_fai=%.2f' % (real_alpha, fai)) # + pycharm={"name": "#%%\n"} NTI = mu_ordered[0][1] / mu_ordered[len(mu_ordered)-1][1] print(NTI)
FFTMethod.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # NSCI 801 - Quantitative Neuroscience # ## Introduction # <NAME> # + [markdown] slideshow={"slide_type": "slide"} # ### Outline # # * Why quantitative Neuroscience? # * Course overview & materials # * The research process # * Study design # # + [markdown] slideshow={"slide_type": "slide"} # ### Why Quantitative Neuroscience? # # * We want to quantify observations # * But data is corrupted by noise # * Certain things are not directly observable (latent) # + we need models! # * Ultimately we want to identify **_causal_** relationships # + [markdown] slideshow={"slide_type": "subslide"} # ### Why Quantitative Neuroscience? # # * We want to quantify observations # - questionnaires # - measurements # # But: such observations are variable... # + [markdown] slideshow={"slide_type": "subslide"} # ### Why Quantitative Neuroscience? # # * We want to quantify observations # * But data is corrupted by noise # - noise in the process / system # - noise due to the measurement # - noise due to A/D conversion # - noise due to post-processing # # Thus: we need ways to infer reality from noisy data # + [markdown] slideshow={"slide_type": "subslide"} # ### Why Quantitative Neuroscience? # # * We want to quantify observations # * But data is corrupted by noise # * Certain things are not directly observable (latent) # - e.g. we cannot measure your thought process (yet), only the outcome! # - e.g. we cannot measure inflammation, only the body's reaction # - we often want good "measures" of latent variables # * Ultimately we want to identify **_causal_** relationships # # Solution: we need models that causally link latent variables to measurable quantities # + [markdown] slideshow={"slide_type": "slide"} # ### Course overview & materials # # * [course web site](http://www.compneurosci.com/NSCI801.html) # * we will use Google Colab - you need a Google account! # * all lecture materials will be in Python & Markdown # * slides / tutorials will be shared on [GitHub](https://github.com/BlohmLab/NSCI801-QuantNeuro)... # * download code from Github into Colab: File>>Open Notebook>>Github... # + [markdown] slideshow={"slide_type": "subslide"} # ### for those interested... # # * Jupyter Notebook has a "slide" option that produces HTML5 slides # - [read more about it](https://medium.com/@mjspeck/presenting-code-using-jupyter-notebook-slides-a8a3c3b59d67) # * install Reveal.js - Jupyter/IPython Slideshow Extension (RISE) # - [install and use](https://rise.readthedocs.io/en/maint-5.5/index.html) # # But you **don't** need any of this! # + [markdown] slideshow={"slide_type": "subslide"} # ### Course overview & materials # # #### Goals of the course: # * hands-on skills in signal processing, basic and advanced statistics, data neuroscience (machine learning) and model fitting methods # * gain intuitive understanding of these topics # * introduction to scientific programming in Python # * familiarization with open science framework approaches # + [markdown] slideshow={"slide_type": "subslide"} # ### Course overview & materials # # #### [Specific topics](http://www.compneurosci.com/NSCI801.html): # * intro to Python & Colab # * signal processing # * statistics and hypothesis testing # * models & data neuroscience # * causality, reproducibility, Open Science # + [markdown] slideshow={"slide_type": "slide"} # ### The research process # # <img style="float: center; width:500px;" src="stuff/research-process.png"> # # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Research design: # * what is power? # * what is effect size? # * how to determine sample size? # # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Research design: # * what is power? # # Power calculations tell us how many samples are required in order to avoid a type I (false positive) or a type II (false negative) error # # Typically in hypothesis testing, only type II errors are considered: For a type II error probability of $\beta$, the corresponding statistical power is $1 − \beta$ # # # # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # <img style="float: right; width:300px;" src="stuff/effect-size.png"> # # #### Research design: # * what is effect size? # # Quantification of the difference between two groups # # E.g. Cohen $d=\frac{\mu_1-\mu_2}{\sigma}$ # # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Let's play - effect size # + slideshow={"slide_type": "fragment"} import matplotlib.pyplot as plt import numpy as np import scipy.stats as stats import math plt.style.use('dark_background') x = np.linspace(-5, 5, 200) mu1 = -.3 sigma = 1 plt.plot(x, stats.norm.pdf(x, mu1, sigma)) mu2 = .2 sigma = 1 plt.plot(x, stats.norm.pdf(x, mu2, sigma)) plt.show() print("Effect size d =", abs((mu1-mu2)/sigma)) # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Let's play - random samples # + slideshow={"slide_type": "fragment"} mu1 = -1 mu2 = 1 sigma = 1 N = 10 # number samples s1 = np.random.normal(mu1, sigma, N) s2 = np.random.normal(mu2, sigma, N) plt.hist(s1, 30, density=True) plt.hist(s2, 30, density=True) plt.show() # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Research design: # # * how to determine sample size? (aka power calculations) # # - you essentially simulate your statistical analysis # - you need to make meaningful assumptions, e.g. group difference, variability, power # - you want to know how many samples you need so that you can reliably identify the hypothesized effect # # * many tools available, e.g. [G*Power](http://www.gpower.hhu.de/), [WebPower online](https://webpower.psychstat.org/wiki/), [powerandsamplesize.com](powerandsamplesize.com), ... # * for Python: [StatsModels package](http://www.statsmodels.org/stable/index.html) # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Research design - let's compute sample size # # This is for a repeated measures t-test... # + slideshow={"slide_type": "fragment"} from numpy import array from statsmodels.stats.power import TTestIndPower # from statsmodels.stats.power import TTestIndPower # parameters for power analysis effect_sizes = array([0.2, 0.5, 0.8]) sample_sizes = array(range(5, 100)) # calculate power curves from multiple power analyses analysis = TTestIndPower() # or TTestIndPower for independent samples analysis.plot_power(dep_var='nobs', nobs=sample_sizes, effect_size=effect_sizes) plt.show() # + [markdown] slideshow={"slide_type": "fragment"} # **What does this mean?** # # * Power is the probability of rejecting the null hypothesis when, in fact, it is false. # * Power is the probability of making a correct decision (to reject the null hypothesis) when the null hypothesis is false. # * Power is the probability that a test of significance will pick up on an effect that is present. # * Power is the probability that a test of significance will detect a deviation from the null hypothesis, should such a deviation exist. # * Power is the probability of avoiding a Type II error. # * Simply put, power is the probability of not making a Type II error # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # <img style="float: center; width:300px;" src="stuff/higgins-science-creationism.png"> # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Hypothesis testing: # * parametric # * non-parametric # * Bayesian # * model-based # * ... # # More later! # # + [markdown] slideshow={"slide_type": "subslide"} # ### The research process # # #### Pearl's research flow # <img style="float: center; width:700px;" src="stuff/Pearl-flow.png"> # Pearl & Mackenzie, “The book of why”, 2018 # + [markdown] slideshow={"slide_type": "slide"} # ### Further readings # # * [A review of statistical concepts](http://www.sagepub.com/upm-data/49259_ch_1.pdf) # * [<NAME>en's probability primer](http://www.rctn.org/bruno/npb163/probability.pdf) # * [Handbook of Biological Statistics - Power analysis](http://www.biostathandbook.com/power.html) # * [Introduction to Power analysis in Python](https://towardsdatascience.com/introduction-to-power-analysis-in-python-e7b748dfa26)
NSCI801_Intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="Dg3k9GCEzXka" import pandas as pd import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from pandas import DataFrame import numpy as np import math import heapq from tqdm import tqdm import random # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="0Q853DySBkka" outputId="6f64cc62-cc60-4ace-ba2b-c2f8dd7687cd" # #!pip install --upgrade tensorflow # #!pip install --upgrade pandas # #!pip install --upgrade numpy tf.__version__ # + id="dlVB0oja3n1n" df = pd.read_csv('users-feeds.csv') # + id="rlw23ZP03ntp" def shrink_users_df(df,user_id): userIds = np.random.choice(df[user_id].unique(), size=int(len(df[user_id].unique())*0.20), replace=False) return df.loc[df[user_id].isin(userIds)] def add_negative_samples(df, item_tag, user_tag,label_tag): updated_df = pd.DataFrame(columns=[user_tag,item_tag,label_tag]) all_feeds = df[item_tag].unique() users, items, labels = [], [], [] user_item_set = set(zip(df[user_tag], df[item_tag])) num_negatives = 2 for (u, i) in user_item_set: users.append(u) items.append(i) labels.append(1) # items that the user has interacted with are positive for _ in range(num_negatives): # randomly select an item negative_item = np.random.choice(all_feeds) # check that the user has not interacted with this item while (u, negative_item) in user_item_set: negative_item = np.random.choice(all_feeds) users.append(u) items.append(negative_item) labels.append(0) # items not interacted with are negative updated_df[user_tag] = users updated_df[item_tag] = items updated_df[label_tag] = labels del df return updated_df def mask_first(x): """ Return a list of 0 for the first item and 1 for all others """ result = np.ones_like(x) result[0] = 0 return result # needs to add validate in the future def train_test_split(full_df): df_test = df.copy(deep=True) df_test = df_test.groupby(['user']).first() df_test['user'] = df_test.index df_test = df_test[['user', 'feed_id','is_following_feed']] df_test = df_test.rename_axis(None, axis=1) df_train = df.copy(deep=True) mask = df.groupby(['user'])['user'].transform(mask_first).astype(bool) df_train = df.loc[mask] return df_train, df_test def random_mini_batches(U, I, L, mini_batch_size=256): """Returns a list of shuffeled mini batched of a given size. Args: U (list): All users for every interaction I (list): All items for every interaction L (list): All labels for every interaction. Returns: mini_batches (list): A list of minibatches containing sets of batch users, batch items and batch labels [(u, i, l), (u, i, l) ...] """ mini_batches = [] shuffled_U, shuffled_I, shuffled_L = U, I, L num_complete_batches = int(math.floor(len(U)/mini_batch_size)) for k in range(0, num_complete_batches): mini_batch_U = shuffled_U[k * mini_batch_size : k * mini_batch_size + mini_batch_size] mini_batch_I = shuffled_I[k * mini_batch_size : k * mini_batch_size + mini_batch_size] mini_batch_L = shuffled_L[k * mini_batch_size : k * mini_batch_size + mini_batch_size] mini_batch = (mini_batch_U, mini_batch_I, mini_batch_L) mini_batches.append(mini_batch) if len(U) % mini_batch_size != 0: mini_batch_U = shuffled_U[num_complete_batches * mini_batch_size: len(U)] mini_batch_I = shuffled_I[num_complete_batches * mini_batch_size: len(U)] mini_batch_L = shuffled_L[num_complete_batches * mini_batch_size: len(U)] mini_batch = (mini_batch_U, mini_batch_I, mini_batch_L) mini_batches.append(mini_batch) return mini_batches # + colab={"base_uri": "https://localhost:8080/"} id="rPGfCcFb3nfq" outputId="b57c5c41-c667-46bd-e2f4-2ebfafd4dddf" df = shrink_users_df(df, 'user') df.loc[:, 'is_following_feed'] = 1 df = add_negative_samples(df,'feed_id','user','is_following_feed') print(df.sample(10)) # + id="mroPkzxP9Jik" df_train, df_test = train_test_split(df) # + id="69FeYGwQz7kN" # HYPERPARAMS #------------- num_neg = 2 latent_features = 8 epochs = 20 batch_size = 256 learning_rate = 0.002 #------------------------- # TENSORFLOW GRAPH #------------------------- graph = tf.Graph() with graph.as_default(): # Define input placeholders for user, item and label. user = tf.placeholder(tf.int32, shape=(None, 1)) item = tf.placeholder(tf.int32, shape=(None, 1)) label = tf.placeholder(tf.int32, shape=(None, 1)) # User embedding for MLP mlp_u_var = tf.Variable(tf.random_normal([len(df['user'].unique()), 32], stddev=0.05), name='mlp_user_embedding') mlp_user_embedding = tf.nn.embedding_lookup(mlp_u_var, user) # Item embedding for MLP mlp_i_var = tf.Variable(tf.random_normal([len(df['feed_id'].unique()), 32], stddev=0.05), name='mlp_item_embedding') mlp_item_embedding = tf.nn.embedding_lookup(mlp_i_var, item) # User embedding for GMF gmf_u_var = tf.Variable(tf.random_normal([len(df['user'].unique()), latent_features], stddev=0.05), name='gmf_user_embedding') gmf_user_embedding = tf.nn.embedding_lookup(gmf_u_var, user) # Item embedding for GMF gmf_i_var = tf.Variable(tf.random_normal([len(df['feed_id'].unique()), latent_features], stddev=0.05), name='gmf_item_embedding') gmf_item_embedding = tf.nn.embedding_lookup(gmf_i_var, item) # Our GMF layers gmf_user_embed = tf.keras.layers.Flatten()(gmf_user_embedding) gmf_item_embed = tf.keras.layers.Flatten()(gmf_item_embedding) gmf_matrix = tf.multiply(gmf_user_embed, gmf_item_embed) # Our MLP layers mlp_user_embed = tf.keras.layers.Flatten()(mlp_user_embedding) mlp_item_embed = tf.keras.layers.Flatten()(mlp_item_embedding) mlp_concat = tf.keras.layers.concatenate([mlp_user_embed, mlp_item_embed]) mlp_dropout = tf.keras.layers.Dropout(0.2)(mlp_concat) mlp_layer_1 = tf.keras.layers.Dense(64, activation='relu', name='layer1')(mlp_dropout) mlp_batch_norm1 = tf.keras.layers.BatchNormalization(name='batch_norm1')(mlp_layer_1) mlp_dropout1 = tf.keras.layers.Dropout(0.2, name='dropout1')(mlp_batch_norm1) mlp_layer_2 = tf.keras.layers.Dense(32, activation='relu', name='layer2')(mlp_dropout1) mlp_batch_norm2 = tf.keras.layers.BatchNormalization(name='batch_norm1')(mlp_layer_2) mlp_dropout2 = tf.keras.layers.Dropout(0.2, name='dropout1')(mlp_batch_norm2) mlp_layer_3 = tf.keras.layers.Dense(16, activation='relu', name='layer3')(mlp_dropout2) mlp_layer_4 = tf.keras.layers.Dense(8, activation='relu', name='layer4')(mlp_layer_3) # We merge the two networks together merged_vector = tf.keras.layers.concatenate([gmf_matrix, mlp_layer_4]) # Our final single neuron output layer. output_layer = tf.keras.layers.Dense(1, kernel_initializer="lecun_uniform", name='output_layer')(merged_vector) # Our loss function as a binary cross entropy. loss = tf.losses.sigmoid_cross_entropy(label, output_layer) # Train using the Adam optimizer to minimize our loss. opt = tf.train.AdamOptimizer(learning_rate = learning_rate) step = opt.minimize(loss) # Initialize all tensorflow variables. init = tf.global_variables_initializer() session = tf.Session(config=None, graph=graph) session.run(init) # + colab={"base_uri": "https://localhost:8080/"} id="2mErQlQn_15K" outputId="b9818e9e-d3cf-4545-aca1-4dedd9ed69ae" for epoch in range(epochs): # Get our training input. user_input, item_input, labels = df_train['user'], df_train['feed_id'], df_train['is_following_feed'] # Generate a list of minibatches. minibatches = random_mini_batches(user_input, item_input, labels) # This has noting to do with tensorflow but gives # us a nice progress bar for the training progress = tqdm(total=len(minibatches)) # Loop over each batch and feed our users, items and labels # into our graph. for minibatch in minibatches: feed_dict = {user: np.array(minibatch[0]).reshape(-1,1), item: np.array(minibatch[1]).reshape(-1,1), label: np.array(minibatch[2]).reshape(-1,1)} # Execute the graph. _, l = session.run([step, loss], feed_dict) # Update the progress progress.update(1) progress.set_description('Epoch: %d - Loss: %.3f' % (epoch+1, l)) progress.close()
tensorflowNCF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python3 (audiozpravy) # language: python # name: audiozpravy # --- # + [markdown] toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Pipeline-part" data-toc-modified-id="Pipeline-part-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Pipeline part</a></span></li><li><span><a href="#Backend-part" data-toc-modified-id="Backend-part-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Backend part</a></span><ul class="toc-item"><li><span><a href="#sort-just-based-of-recency" data-toc-modified-id="sort-just-based-of-recency-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>sort just based of recency</a></span></li><li><span><a href="#sort-based-on-frecency" data-toc-modified-id="sort-based-on-frecency-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>sort based on frecency</a></span></li></ul></li><li><span><a href="#sort-based-on-modified-frecency" data-toc-modified-id="sort-based-on-modified-frecency-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>sort based on modified frecency</a></span></li></ul></div> # - # %load_ext autoreload # %autoreload 2 # %matplotlib inline # # Pipeline part import pandas as pd import glob # !pwd article_snapshots = glob.glob("../articles/articles-*") len(article_snapshots) from pipeline.text_processing import load_article_snapshots df = load_article_snapshots(article_snapshots) from pipeline.text_processing import add_lemmatized_texts, fit_tf_idf add_lemmatized_texts(df) X, words = fit_tf_idf(df["lemmatized_texts"]) words # # Backend part from backend.recommender.recommend import ( recommend, get_daily_google_trends, get_relevant_words, estimate_popularity, calculate_frecency ) top_trends = get_daily_google_trends() top_trends pop = estimate_popularity(top_trends, X, words) (pd.Series(pop.squeeze()) == 0).value_counts() # ## sort just based of recency df.sort_values("published", ascending=False).title.values[:20] # ## sort based on frecency recommend(df, X, words) # # sort based on modified frecency # (mitigate impact of popularity) import time import numpy as np from math import log, e def calculate_frecency(popularity, age): """ see https://wiki.mozilla.org/User:Jesse/NewFrecency """ # how much will be older articles penalized, # interpretation: the denomintaor is number of seconds after which the score halves lambda_const = log(2) / (7*24*60*60) # 7 days return np.multiply(np.log(popularity), np.exp(-lambda_const * age)) # + daily_trends = get_daily_google_trends() popularity = estimate_popularity(daily_trends, X, words) + 1 age = time.time() - df.published.map(time.mktime) frecency = np.squeeze(np.asarray(calculate_frecency(popularity.T, age.values))) # - (pd.Series(frecency) == 0).value_counts() top_ids = frecency.argsort()[::-1][:10] df.iloc[top_ids[:10], 0].values
poc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Veeam Backup for Azure job update # # This Notebook shows how you can perform updates to multiple policies in Veeam Backup for Azure. # # First as this is Python we need to import all the packages, note that requests needs to be installed. # # pip install requests # # This is all based on the Veeam helpcenter: https://helpcenter.veeam.com/docs/vbazure/rest/editing_backup_policy_by_policy_id.html?ver=10 import requests import urllib3 import pprint import getpass urllib3.disable_warnings() # Enter the VBA credentials ip = input('Enter VBA IP: ') url = f"https://{ip}/api/oauth2/token" username = input('Enter Username: ') password = <PASSWORD>("Enter password: ") # Log into VBA and get the access token, this requires building a body object (data) and the header object. # + data = {"grant_type" : "password", "username" : username, "password": password} headers = {'Content-Type': 'application/json', 'Accept': 'application/json'} res = requests.post(url, data=data, verify=False) print(f"Status code: {res.status_code}") res_json = res.json() access_token = res_json.get('access_token') # print(res_json) # - # Add the access token to the header headers['Authorization'] = "Bearer " + access_token # Make request to get policies # + export_policies = f'https://{ip}/api/v1/policies/export' res = requests.post(export_policies, headers=headers, verify=False).json() # policy = res['results'][0] pprint.pprint(res) # - # Need to grab all the relevant attributes that we will need including, name, id and tenantId and construct a new object to use to update the policy. # # Also going to build a list of the policy ids to iterate over, I could use object above but it's just easier to create a new list. # + attributes = [] for item in res['results']: name = item['name'] ident = item['id'] tenantId = item['tenantId'] new_object= {'name': name, 'id': ident, 'tendantId': tenantId} attributes.append(new_object) policy_ids = [item['id'] for item in res['results']] print(attributes) # - # We also need to get a list of VMs in each policy, to do this we need to send a new GET request to the 'selectedItems' end point then add that to the above object. # + policy_vms = [] policy_num = 0 for policy in policy_ids: ps = requests.get(f'https://{ip}/api/v1/policies/{policy}/selectedItems', headers=headers, verify=False).json() pvms = [item['virtualMachine']['id'] for item in ps['results']] vm_list = [] for vms in pvms: vm_list.append({'id': vms}) attributes[policy_num]['virtualMachines'] = vm_list policy_num += 1 pprint.pprint(attributes) # - # Next we will update the template policy that was copied from VBA and update it with each of the new updated policies. Each of them will be held in a list of dictionaries. # # Note that this only shows a backup, no snapshots are configured in this policy. # + # Policy Template policy_update = { "name": "", "description": "", "isEnabled": True, "priority": 1, "tenantId": "", "backupType": "SelectedItems", "regions": [ { "regionId": "uksouth" } ], "selectedItems": { "subscriptions": [], "resourceGroups": [], "tags": [], "virtualMachines": [ { "id": "" } ] }, "excludedItems": { "virtualMachines": [] }, "backupSettings": { "targetRepositoryId": "1", "retentionSettings": { "retentionDurationType": "Daily", "timeRetentionDuration": 4 }, "schedule": { "frequencyType": "Daily", "dailyTime": "20:00:00", "dailyType": "Everyday", "selectedDays": [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ] } }, "retrySettings": { "retryCount": 3 } } policies = [] # looping through each of the attribute objects and creating a new Policy object that we can use. for item in attributes: policy_update['name'] = item['name'] policy_update['tenantId'] = item['tendantId'] policy_update['selectedItems']['virtualMachines'] = item['virtualMachines'] policy_update['backupSettings']['retentionSettings']['timeRetentionDuration'] = 10 # making a single change to the retetnion policies.append(policy_update) pprint.pprint(policies) # - # Next loop through the policy ids sending a PUT request to the API to make the changes to the policies. # + # Lists to hold the response codes and responses from the calls status_codes = [] responses = [] # Loop through each of the attributes first as they hold the policy id for item in attributes: policy_url = f'https://{ip}/api/v1/policies/{item['id']}' # Next loop through the policies for policy in policies: # Where there is a matching name we send the request with the new policy dictionary converted to JSON (json = policy) if policy['name'] == item['name']: res = requests.put(policy_url, headers=headers, json=policy, verify=False) # Update each of the response arrays status_codes.append(res.status_code) responses.append(res) for item in status_codes: if item == 204: print("Upate OK") else: print('Update Error') print(responses) # - # Finally you'll want to delete the headers with the access token headers = [] print(headers) # All done! See simple :)
azure_api.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: tf18p35 # language: python # name: tf18p35 # --- # ### 将DATA.txt的数据按月导入数据库 # 数据库的表一个月一张,命名为data20180X # # import mysql.connector from sqlalchemy import create_engine import os import cv2 import matplotlib.pyplot as plt # %matplotlib inline import numpy as np import glob engine = create_engine('mysql+mysqlconnector://trnet:trnet@127.0.0.1:3306/trnet') # con = engine.connect() # + data_07s = [os.path.split(os.path.dirname(i))[-1] for i in glob.glob('/trnet_fs/data*07*/')] data_07s.remove('data_0723') data_07s.remove('data0720') data_07s.remove('data_0507') data_07s # - # !ls /trnet_fs/ # !mkdir /home/lidingke/DATA/txts/07 data_04s = [ 'data0403', 'data0408', 'data0409', 'data0410', 'data0411', 'data0412', 'data0413', 'data0416', 'data0417', 'data0418', 'data0419', 'data0423', 'data0425',] data_05s = ['data_0503', 'data_0504', 'data_0509', 'data_0511', 'data_0514', 'data_0516', 'data_0518', 'data_0523', 'data_0525', 'data_0529', 'data_0531'] data_06s = ['data_0601', 'data0605', 'data0608', 'data_0612', 'data_0613', 'data_0615', 'data_0620', 'data_0621','data_0625','data_0626','data_0628'] data_06s[8:] # ## 生成供导入的txt # 编码有问题的行丢掉,一般一个月里面会丢个两三行。 def create_data_txts(date,file): path = '/trnet_fs/{}/DATA_LABEL.txt'.format(date) with open(path, 'rb') as f: lines = f.readlines() dirname = os.path.dirname(path) results = [] for line in lines: try: line = line.decode('GBK').strip().split('\t') except UnicodeDecodeError as e: print(date) print(line) continue fullpath = dirname+'/'+line[-1].replace('\\','/') r = (line[0],line[-1][:6],line[2],line[3],fullpath) results.append('\t'.join(r).encode('utf-8')+b'\r\n') with open('/home/lidingke/DATA/txts/{}/{}.txt'.format(file,date,), 'wb') as f: f.writelines(results) return date [create_data_txts(d,'07' ) for d in data_07s] # !ls /home/lidingke/DATA/txts/07 # !rm /home/lidingke/DATA/txts/07/* # !tail -n 10 /home/lidingke/DATA/txts/06/data_0628.txt def create_data_txts_u(d): path = '/trnet_fs/{}/DATA_U.txt'.format(d) with open(path, 'rb') as f: lines = f.readlines() dirname = os.path.dirname(path) results = [] for line in lines: line = line.decode('UTF-8').strip().split('\t') fullpath = dirname+'/'+line[-1].replace('\\','/') r = (line[0],line[-1][:6],line[2],line[3],fullpath) results.append('\t'.join(r).encode('utf-8')+b'\r\n') with open('/home/lidingke/DATA/txts/05/{}.txt'.format(d), 'wb') as f: f.writelines(results) return d # create_data_txts_u('data_0507' ) # ## 导入txt 到db的table def load_txt_to_db(txts,mon): with engine.connect() as con: for d in txts: print(d) cmd = "LOAD DATA LOCAL INFILE \'/home/lidingke//DATA/txts/{}/{}.txt\' INTO TABLE data2018{};".format(mon,d,mon) r = con.execute(cmd) print(cmd) # load_txt_to_db(['data_05s']) data_0e =['data_0723', 'data0720'] load_txt_to_db(data_0e,'07') # #### 新建相似表的命令: # `CREATE TABLE data201806 LIKE data201805` # #### todolist: # - [x] 剩下还有0507没导入 # - [x] 配置好6月份单个导入的 # - [x] 6月份的导入到了0621 # - [x] 6、7月已经全部导入 # %%time with engine.connect() as con: cmd = "SELECT dir, content FROM data201807 WHERE content IN ('李强','王磊')" results = con.execute(cmd) for r in results: print(r) dir_ = r[0].strip() im = cv2.imread(dir_) plt.imshow(im) # plt.show() with engine.connect() as con: cmd = "LOAD DATA LOCAL INFILE \'/home/lidingke/tools/sql_py/test.txt\' INTO TABLE test;" r = con.execute(cmd) print(r)
ldklib/jupyter_demo/sql_load_txt_into_db.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.5.3 # language: julia # name: julia-1.5 # --- # # Recitation 1: a crash course in Julia and JuMP # # <a href="http://julialang.org"><img src="figures/julia.png" alt="Julia" style="width: 150px;"/></a> # <a href="http://jump.dev"><img src="figures/JuMP-logo.png" alt="JuMP" style="width: 150px;"/></a> # ## 1. Why Julia/JuMP? # # - Julia is a "high-level, high-performance dynamic programming language for technical computing." Think the linear algebra power of Matlab, with the speed of C and the readability of Python. # - JuMP is a library that allows us to easily formulate optimization problems and solve them using a variety of solvers. It provides an easy interface to implement advanced optimization techniques. # - Check out this [talk](https://github.com/johnfgibson/whyjulia/blob/master/1-whyjulia.ipynb]) for more details on why Julia is awesome. # ## 2. Julia basics # # ### 2.1 Jupyter # # #### What is a Jupyter notebook? # # - Jupyter notebooks are **documents** (like a Word document) that can contain and run code. # - They were originally created for Python as part of the IPython project, and adapted for Julia by the **IJulia** project. # - They are very useful to **prototype**, draw **plots**, or even for teaching material like this one. # - The document relies only on a modern browser for rendering, and can easily be **shared**. # # #### How do I even open this file? # # Once Julia is installed, start julia and just run the following commands to install the `IJulia` package. # ```jl # using Pkg # Pkg.install("IJulia") # ``` # This should work on its own. If there is any issue, check out the [IJulia website](https://github.com/JuliaLang/IJulia.jl). # # Once IJulia is installed, go to the directory containing the notebook file (`Recitation 1.ipynb`), start julia and run: # ```jl # using IJulia # notebook() # ``` # A webpage should open automatically, just click on the notebook to load it. # #### Navigating the notebook # # - Click `Help -> User Interface Tour` for a guided tour of the notebook interface. # - Each notebook is composed of **cells**, that either contain code or text (`Markdown`). # - You can edit the content of a cell by double-clicking on it (_Edit Mode_). # # When you are not editing a cell, you are in _Command mode_ and can edit the structure of the notebook (cells, name, options...) # # - Create a cell by: # - Clicking `Insert -> Insert Cell` # - Pressing `a` or `b` in Command Mode # - Pressing `Alt+Enter` in Edit Mode # - Delete a cell by: # - Clicking `Edit -> Delete Cell` # - Pressing `dd` # - Execute a cell by: # - Clicking `Cell -> Run` # - Pressing `Ctrl+Enter` # - Pressing `Shift+Enter` (this will also move your focus to the next cell) # # Other functions: # - Undo last text edit with `Ctrl+z` in Edit Mode # - Undo last cell manipulation with `z` in Command Mode # - Save notebook with `Ctrl+s` in Edit Mode # - Save notebook with `s` in Command Mode # # Though notebooks rely on your browser to work, they do not require an internet connection (except for math rendering). # ### 2.2 How to Julia # # Julia, as a dynamic language, can simply be used as a calculator: 1+1 sin(exp(2*pi)+sqrt(3)) # The key building blocks of Julia code are variables: a = 1 b = 2 # This is a comment c = a^2 + b^3 # Julia supports the usual `if`, `while` and `for` structures: if c >= 10 print("Yes") else print("No") end i = 1 while i <= 5 println("Why, hello!") # Print with a new line i += 1 end for i = 1:3 print("$i banana") # '$' can be used to insert variables into text if i>1 print("s") end println() # Just a new line end # **Do not worry about writing loops**: in Julia, they are as fast as writing vectorized code, and sometimes faster! # **Arrays** (list of numbers) are at the core of research computing and Julia's arrays are extremely optimized. myList = [6, 7, 8] # Array indexing starts with 1 in Julia, and arrays are mutable. @show myList[1] myList[3] = 4 @show myList; # A 2-dimensional array is a Matrix # + A = [1 2 3 2 1 2 3 2 1] A = [1 2 3; 2 1 2; 3 2 1] #same thing # - # ## 2.3 Reading data - CSV and DataFrames # You can install these packages with: using Pkg Pkg.add("CSV") Pkg.add("DataFrames") using DataFrames, CSV # We're going to load the data for our optimization example, the transportation problem, where factories and markets are both located in the 2D plane. # - `data/supply.csv` has one row per factory, with columns for the (x, y) coordinates, and a column for the capacity # - `data/demand.csv` has one row per market, with columns for the (x, y) coordinates, and a column for the demand supply = CSV.read("data/supply.csv", DataFrame) demand = CSV.read("data/demand.csv", DataFrame); first(demand, 5) # ## 3. Basics of JuMP # # Now we will use this data to formulate and solve the transportation problem. First, we need to install a solver. A good choice is the Gurobi solver. You can follow [these instructions](https://github.com/jump-dev/Gurobi.jl) to install both Gurobi and its Julia wrapper `Gurobi.jl`. # # Then we can load JuMP and Gurobi. using JuMP, Gurobi # We're going to use JuMP to "translate" our transportation problem (see slides) into something that Gurobi can solve. "Function to build the transportation model, returns model and decision variable handles" function build_transportation_model(supply::DataFrame, demand::DataFrame) # initialize the model, and specify the solver model = Model(Gurobi.Optimizer) # Decision variables # Capacity constraint # Demand constraint # Objective return model, x end # We can now build the optimization model. Notice that Jupyter can display the model (but beware output overload for large models). model, x = build_transportation_model(supply, demand) model x # Now we can solve the model using the `optimize!` command. The `!` is a Julia convention that indicates that the function modifies its argument (in this case, by solving the optimization problem). optimize!(model) # Now we can extract the optimal objective: objective_value(model) # We can also obtain the optimal variable values: value(x[1, 4]) [value(x[i, j]) for i=1:nrow(supply), j=1:nrow(demand)]
Rec1_code/Recitation 1-Student.ipynb