code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('knn.csv') df = df.drop(['id'],axis=1) df # + test_frac = 0.3 test = df.sample(frac=test_frac) x_test = pd.DataFrame(test.iloc[:,1:]) y_test = pd.DataFrame(test.iloc[:,0]) train = df.loc[~(df.index.isin(test.index))] x_train = pd.DataFrame(train.iloc[:,1:]) y_train = pd.DataFrame(train.iloc[:,0]) print(df.shape, test.shape, train.shape) x_train =(x_train-x_train.mean())/(x_train.std()) x_test = (x_test-x_test.mean())/(x_test.std()) # - from sklearn.neighbors import KNeighborsClassifier neigh = KNeighborsClassifier(n_neighbors=3) neigh.fit(x_train, y_train) predictions = neigh.predict(x_test) score = neigh.score(x_test,y_test) score import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics cm = metrics.confusion_matrix(y_test, predictions) print(cm) plt.figure(figsize=(9,9)) sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); all_sample_title = 'Accuracy Score: {0}'.format(score) plt.title(all_sample_title, size = 15); print(metrics.classification_report(y_test,predictions)) from pandas import read_csv from sklearn.ensemble import ExtraTreesClassifier model = ExtraTreesClassifier() model.fit(x_train, y_train) fimps = model.feature_importances_ # + dic = {} j = 0 for i in fimps: dic[i] = j j+=1 fimps = list(sorted(fimps)) maxii = 0 maxknnk = 0 maxf = 0 for z in range(1,31): k = z selected = [] for i in range(k): selected.append(dic[fimps[i]]) barr = [False]*30 for i in selected: barr[i] = True print(selected) x_trainlf = x_train.iloc[:,selected] x_testlf = x_test.iloc[:,selected] y_trainlf = y_train y_testlf = y_test accs = [] maxi = 0 maxk = 0 for i in range(1,100): neigh = KNeighborsClassifier(n_neighbors=i) neigh.fit(x_trainlf, y_trainlf) predictions = neigh.predict(x_testlf) score = neigh.score(x_testlf,y_testlf) accs.append(score) if score>maxi: maxi = score maxk = i # plt.plot(accs) # plt.show() # print(maxi,maxk) if maxi>maxii: maxii = maxi maxknnk = maxk maxf = z print(maxii,maxknnk,maxf) # - print(maxii,maxknnk,maxf) # + from sklearn.neighbors import KNeighborsClassifier accs = [] maxi = 0 maxk = 0 for i in range(1,100): neigh = KNeighborsClassifier(n_neighbors=i) neigh.fit(x_trainlf, y_trainlf) predictions = neigh.predict(x_testlf) score = neigh.score(x_testlf,y_testlf) accs.append(score) if score>maxi: maxi = score maxk = i plt.plot(accs) plt.show() print(maxi,maxk) # - print(x_trainlf.shape, y_trainlf.shape, x_testlf.shape, y_testlf.shape) # # ROC x_trainlf =(x_trainlf-x_trainlf.mean())/(x_trainlf.std()) x_testlf = (x_testlf-x_testlf.mean())/(x_testlf.std()) x_trainlf # + k = 24 selected = [] for i in range(k): selected.append(dic[fimps[i]]) barr = [False]*30 for i in selected: barr[i] = True print(selected) x_trainlf = x_train.iloc[:,selected] x_testlf = x_test.iloc[:,selected] y_trainlf = y_train y_testlf = y_test neigh = KNeighborsClassifier(n_neighbors=4) neigh.fit(x_trainlf, y_trainlf) predictions = neigh.predict(x_testlf) score = neigh.score(x_testlf,y_testlf) score*100 # - fpr,tpr,thresholds = metrics.roc_curve(y_test,predictions) ras = metrics.roc_auc_score(y_test,predictions) plt.plot(fpr, tpr, marker='.') plt.show()
KNN/KNN Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Manejo de archivos .CSV # # Uno de los formatos más utilizados en la actualidad para intercambio de datos es CSV (“Comma Separated Values”). Estas son básicamente archivos de texto en los que cada línea contiene una fila de datos con múltiples registros delimitados por un separador. Tradicionalmente el separador suele ser la coma, de ahí el nombre del formato. Aunque también se pueden utilizan otros caracteres que no suelen estar contenidos en los datos. Por ejemplo, espacios, tabuladores y puntos y coma. Lo que los hace muy fáciles de procesar y son soportados por cualquier aplicación. Incluso son fáciles de leer por personas con editores de texto. Por eso es clave saber guardar y leer archivos CVS con Python. # # # Guardado de archivos CSV # # Para guardar un archivo CSV es necesario disponer en primer lugar de un dataframe en memoria. Por lo que se pude crear uno de ejemplo. Una vez generado el este se puede volcar a un archivo CSV utilizando el método to_csv. # + import pandas as pd data = {'first_name': ['Sigrid', 'Joe', 'Theodoric','Kennedy', 'Beatrix', 'Olimpia', 'Grange', 'Sallee'], 'last_name': ['Mannock', 'Hinners', 'Rivers', 'Donnell', 'Parlett', 'Guenther', 'Douce', 'Johnstone'], 'age': [27, 31, 36, 53, 48, 36, 40, 34], 'amount_1': [7.17, 1.90, 1.11, 1.41, 6.69, 4.62, 1.01, 4.88], 'amount_2': [8.06, "?", 5.90, "?", "?", 7.48, 4.37, "?"]} df = pd.DataFrame(data, columns = ['first_name', 'last_name', 'age', 'amount_1', 'amount_2']) df.to_csv('example.csv') # - # En caso de que se desee cambiar el delimitador se puede indicar con la propiedad sep. Para que este sea punto y coma simplemente se ha de escribir: df.to_csv('example_semicolon.csv', sep=';') # # Lectura de archivos CSV # # La lectura de los archivos se realiza con el método read_csv de pandas. Solamente se la ha de indicar la ruta al archivo. df = pd.read_csv('example.csv') df # Al igual que antes se puede indicar el separador utilizado mediante la propiedad sep. df = pd.read_csv('example_semicolon.csv', sep=';') # Por defecto se utiliza la primera línea del fichero como cabecera para asignar los nombres a las columnas. En el caso de que el archivo no disponga de cabecera se puede evitar esto asignando None a la propiedad head. df = pd.read_csv('example.csv', header=None) df # El archivo que se ha utilizado en esta ocasión tiene cabecera, por lo que esta se ha cargado como la primera fila. En caso de que se desee ignorar una o más filas se le puede indicar medítate la propiedad skiprows. df = pd.read_csv('example.csv', header=None, skiprows=1) df # Los nombres de las columnas del dataframe se pueden indicar mediante la propiedad names. df = pd.read_csv('example.csv', skiprows = 1, names=['UID', 'First Name', 'Last Name', 'Age', 'Sales #1', 'Sales #2']) df # Es posible que los archivos contengan valores nulos. En el ejemplo se puede ver que este es ?. La propiedad que permite que se asigne un valor nulo cuando se encuentra un valor dado es na_values=['.'] df = pd.read_csv('example.csv', skiprows=1, names=['UID', 'First Name', 'Last Name', 'Age', 'Sales #1', 'Sales #2'], na_values=['?']) df # Finalmente se puede asignar un ídice a los datos df = pd.read_csv('example.csv', skiprows=1, names=['UID', 'First Name', 'Last Name', 'Age', 'Sales #1', 'Sales #2'], na_values=['?'], index_col='UID') df # O más de uno df = pd.read_csv('example.csv', skiprows=1, names=['UID', 'First Name', 'Last Name', 'Age', 'Sales #1', 'Sales #2'], na_values=['?'], index_col=['First Name', 'Last Name']) # # Manejo de archivos Excel # Antes de poder guardar un archivo Excel desde Python es necesario disponer de un dataframe. Por lo que se puede crear uno de ejemplo, como se hizo al hablar de los archivos CVS: # + import pandas as pd data = {'first_name': ['Sigrid', 'Joe', 'Theodoric','Kennedy', 'Beatrix', 'Olimpia', 'Grange', 'Sallee'], 'last_name': ['Mannock', 'Hinners', 'Rivers', 'Donnell', 'Parlett', 'Guenther', 'Douce', 'Johnstone'], 'age': [27, 31, 36, 53, 48, 36, 40, 34], 'amount_1': [7.17, 1.90, 1.11, 1.41, 6.69, 4.62, 1.01, 4.88], 'amount_2': [8.06, "?", 5.90, "?", "?", 7.48, 4.37, "?"]} df = pd.DataFrame(data, columns = ['first_name', 'last_name', 'age', 'amount_1', 'amount_2']) # - # Ahora para exportar los datos en formato Excel simplemente se ha utilizar el método to_excel del dataframe. En esta ocasión se ha de indicar el archivo en el que se desea guardar los datos mediante una cadena de texto. Opcionalmente se puede indicar también el nombre de la hoja del libro Excel mediante la propiedad sheet_name. El contenido del archivo generado el siguiente código se muestra en la figura. df.to_excel('example.xlsx', sheet_name='example') # # Lectura de archivos Excel en Python # # El proceso de lectura se realiza con el método read_excel de pandas. En el caso de que el libro contenga más de una hoja se puede indicar el nombre de la que se desea importar mediante el método sheet_name. Cuando no se indique una cargara el contenido de la primera hoja del libro. df = pd.read_excel('example.xlsx', sheet_name='example') # Por defecto el método utiliza la primera línea del fichero como cabecera para asignar los nombres a las columnas. En el caso de que el archivo no disponga de cabecera se puede evitar este comportamiento asignando None a la propiedad head. df = pd.read_excel('example.xlsx', header=None) # El archivo que se ha utilizado en esta ocasión tiene cabecera, por lo que esta se ha importado como la primera fila. En caso de que se desee ignorar una o más filas se le puede indicar mediante la propiedad skiprows. df = pd.read_excel('example.xlsx', header=None, skiprows=1) # En el caso de que se desee indicar un nombre concreto para cada una de las columnas, diferente al de la hoja, se puede indicar mediatne la propiedad names. df = pd.read_excel('example.xlsx', skiprows = 1, names=['UID', 'First Name', 'Last Name', 'Age', 'Sales #1', 'Sales #2']) # # Manejo de archivos de texto (.txt) # # # Abrir un archivo para leer o escribir en Python # # Antes de leer o escribir archivos con Python es necesario es necesario abrir una conexión. Lo que se puede hacer con el comando open(), al que se le ha de indicar el nombre del archivo. # # La documentación se encuentra en: https://docs.python.org/2/tutorial/inputoutput.html#reading-and-writing-fileshttp:// # # Por defecto la conexión se abre en modo lectura, con lo que no es posible escribir en el archivo. Para poder escribir es necesario utilizar la opción "w" con la que se eliminará cualquier archivo existente y creará uno nuevo. Otra opción que se puede utilizar es "a", con la que se añadirá nuevo contenido al archivo existente. Las opciones se pueden ver en el siguiente código. # Abre el archivo para escribir y elimina los archivos anteriores si existen fic = open("Archivos/text.txt", "w") # Abre el archivo para agregar contenido fic = open("Archivos/text.txt", "a") # Abre el archivo en modo lectura fic = open("Archivos/text.txt", "r") fic.close() # En todos los casos, una vez finalizado las operaciones de lectura y escritura con los archivos, una buena práctica es cerrar el acceso. Para lo que se debe utilizar el método close(). # ### Como un resumen # # La **r** indica el modo lectura. Si se intentara utilizar la función write para escribir algo, se lanzaría la excepción IOError. A continuación los distintos modos. # # - **r** – Lectura únicamente. # - **w** – Escritura únicamente, reemplazando el contenido actual del archivo o bien creándolo si es inexistente. # - **a** – Escritura únicamente, manteniendo el contenido actual y añadiendo los datos al final del archivo. # - **w+**, **r+** o **a+** – Lectura y escritura. # # El signo **'+'** permite realizar ambas operaciones. La diferencia entre **w+** y **r+** consiste en que la primera opción borra el contenido anterior antes de escribir nuevos datos, y crea el archivo en caso de ser inexistente. **a+** se comporta de manera similar, aunque añade los datos al final del archivo. # # Todas las opciones anteriores pueden combinarse con una 'b' (de binary), que consiste en leer o escribir datos binarios. Esta opción es válida únicamente en sistemas Microsoft Windows, que hacen una distinción entre archivos de texto y binarios. En el resto de las plataformas, es simplemente ignorada. Ejemplos: rb, wb, ab+, rb+, wb+. # # Escribir archivos de texto en Python # # Antes de guardar un archivo es necesario disponer de un vector con las cadenas de texto que se desean guardar. Para ello se puede crear un vector al que se le puede llamar data. data = ["Línea 1", "Línea 2", "Línea 3", "Línea 4", "Línea 5"] # Para escribir el contenido de este vector en un archivo se puede hacer de dos maneras: línea a línea o de una sola vez. # # Escribir el archivo línea a línea # # El método más fácil directo para volcar el vector en un archivo es escribir el contenido línea a línea. Para ello se puede iterar sobre el archivo y utilizar el método write de archivo. Este proceso es lo que se muestra en el siguiente ejemplo. # + fic = open("text_1.txt", "w") for line in data: fic.write(line) fic.write("\n") fic.close() # - # Nótese que los elementos de vector no finalizan con el carácter salto de línea. Por lo tanto, es necesario añadir este después de escribir cada línea. Ya que, de lo contrario, todos los elementos se escribirían en una única línea en el archivo de salida. # # Una forma de escribir el archivo línea a línea sin que sea necesario incluir el salto de línea es con la función print. Para esto es necesario incluir la opción "file" con la conexión al archivo. Esta opción se puede ver en el siguiente ejemplo. # + fic = open("text_2.txt", "w") for line in data: print(line, file=fic) fic.close() # - # # Escribir el archivo de una vez # # Finalmente, en el caso de que los datos se encuentren en un objeto iterable se puede utilizar el método writelines para volcar este de golpe. Aunque es necesario tener en cuenta que este método no agrega el salto de línea, por lo que puede ser necesario agregarlo con antelación. fic = open("text_3.txt", "w") fic.writelines("%s\n" % s for s in data) fic.close() # # Leer archivos de texto en Python # # La lectura de los archivos, al igual que la escritura, se puede hacer de dos maneras: línea a línea o de una sola vez. # # # Leer el archivo de una vez # # El procedimiento para leer los archivos de texto más sencillo es hacerlo de una vez con el método readlines. Una vez abierto el archivo solamente se ha de llamar a este método para obtener el contenido. Por ejemplo, se puede usar el siguiente código. fic = open('text_1.txt', "r") lines = fic.readlines() fic.close() lines # En esta ocasión lines es un vector en el que cada elemento es una línea del archivo. Alternativamente, en lugar del método readlines se puede usar la función list para leer los datos. fic = open('text_1.txt', "r") lines = list(fic) fic.close() lines # # Leer el archivo línea a línea # # En otras ocasiones puede ser necesario leer el archivo línea a línea. Esto se puede hacer simplemente iterando sobre el fichero una vez abierto. En cada iteración se podrá hacer con cada línea cualquier operación que sea necesaria. En el siguiente ejemplo cada una de las líneas se agrega a un vector. # + fic = open('text_1.txt', "r") lines = [] for line in fic: lines.append(line) fic.close() # - lines # # Eliminar los saltos de línea en el archivo importado # # Los tres métodos que se han visto para leer los archivos importan el salto de línea. Por lo que puede ser necesario eliminarlo antes de trabajar con los datos. Esto se puede conseguir de forma sencilla con el método rstrip de las cadenas de texto de Python. Lo que se puede hacer iterando sobre el vector. lines1 = [s.rstrip('\n') for s in lines] lines1 # # Manejo de archivos .JSON # # El formato de archivo JSON es uno de los más populares en los últimos años para serializar los datos. Los archivos de este formato se pueden obtener como resultados de la mayoría de las aplicaciones API REST y otras aplicaciones web. Debido a su gran popularidad es cada vez más probable que se necesite leer o escribir archivos JSON con Python. # # En Python el formato JSON se puede procesar gracias al paquete json. Este paquete contiene el código que permite transformar los archivos JSON en diccionarios o viceversa. # # # El formato JSON # # JSON es un formato para el intercambio de datos basado en texto. Por lo que es fácil de leer para tanto para una persona como para una maquina. El nombre es un acrónimo de las siglas en inglés de JavaScript Object Notation. Lo que indica que su origen se encuentra vinculado al lenguaje JavaScript. Aunque hoy en día puede ser utilizado desde casi todos los lenguajes de programación. JSON se ha hecho fuerte como alternativa a XML, otro formato de intercambio de datos que requiere más metainformación y, por lo tanto, consume más ancho de banda y recursos. # # Los datos en los archivos JSON son pares de propiedad valor separados por dos puntos. Estos pares se separan mediante comas y se encierran entre llaves. El valor de una propiedad puede ser otro objeto JSON, lo que ofrece una gran flexibilidad a la hora de estructurar información. Esta estructura de datos recuerda mucho a los diccionarios de Python. # # # Escribir archivos JSON con Python # # La forma más sencilla de generar un archivo JSON desde Python es exportar los datos contenidos en un objeto diccionario. Al igual que los objetos JSON, los objeto diccionarios pueden contener cualquier tipo de datos: valores numéricos, cadena de textos, vectores o cualquier otro tipo de objeto. El código necesario para traducir el diccionario a formato JSON se encuentra disponible en el paquete json. # # A continuación, se muestra un ejemplo. Inicialmente se importa el paquete json. Posteriormente se crear un objeto diccionarios al que se agregan los datos de tres clientes entre los que se encuentra el nombre los apellidos, la edad y la cantidad gastada por cada uno. En el ejemplo se pude apreciar la utilización de diferentes tipos de datos como cadena de texto, valores reales y vectores. Otra cosa que se puede apreciar es que una misma propiedad puede tener diferentes datos en cada registro. Lo que no se puede conseguir en otros formatos como CSV. Finalmente se abre un archivo y se vuelca en el mismo los datos del diccionario utilizando json.dump. # + import json data = {} data['clients'] = [] data['clients'].append({ 'first_name': 'Sigrid', 'last_name': 'Mannock', 'age': 27, 'amount': 7.17}) data['clients'].append({ 'first_name': 'Joe', 'last_name': 'Hinners', 'age': 31, 'amount': [1.90, 5.50]}) data['clients'].append({ 'first_name': 'Theodoric', 'last_name': 'Rivers', 'age': 36, 'amount': 1.11}) with open('data.json', 'w') as file: json.dump(data, file, indent=4) # - # Al ejecutar este código se genera un archivo data.json en el que se pueden observar los datos. En este archivo todos los datos se encuentran en una única línea, ocupando el mínimo espacio posible. Una ver formateado para facilitar la lectura a las personas se puede observar en la siguiente captura de pantalla. # Este comportamiento por defecto es el más adecuado ya que reduce el tamaño de los archivos generados y por lo tanto reduce el ancho de banda necesario. En caso de que sea necesario obtener archivos JSON formateados para que puedan ser fácilmente leídos por personas se puede utilizar la opción indent la indentación de los valores. # # # Obtener el código JSON como una cadena de texto # # En el caso de que se desee obtener el contenido del archivo JSON en una variable se puede utilizar el método json.dumps. Este devuelve una cadena de texto con el contenido en lugar de guardarlo en un archivo. Esto puede dar un poco más de control si es necesario realizar algunas operaciones sobre la cadena, como firmarla o encriptarla. # # # Leer archivos JSON con Python # # La lectura de archivos JSON es similar al proceso de escritura. Se ha de abrir un archivo y procesar este utilizando el método json.load. El cual devolverá objeto de tipo diccionario sobre el que se puede iterar. A modo de ejemplo se puede importar el archivo generado anteriormente y sacar los resultados por pantalla utilizando el siguiente código. with open('data.json') as file: data = json.load(file) for client in data['clients']: print('First name:', client['first_name']) print('Last name:', client['last_name']) print('Age:', client['age']) print('Amount:', client['amount']) print('') # # Convertir una cadena de texto con JSON en un diccionario # # Al igual que anteriormente también se puede convertir una cadena de texto que contiene un objeto JSON en un diccionario de Python. Para esto se ha de utilizar el método json.loads. Este método es de gran utilidad cuando como resultado de un servicio web se obtiene una cadena de texto con un objeto JSON, el cual se puede transforma fácilmente en un diccionario. # # En el siguiente ejemplo se muestra como se procesan los resultados de consultar un API en # + import requests resp = requests.get('http://ip-api.com/json/208.80.152.201') json.loads(resp.content) # - # # Opciones # # El comportamiento de la librería json se puede personalizar utilizando diferentes opciones. Entre algunas de las opciones que se pueden configurar se encuentra la codificación y la ordenación. # # # Codificación Unicode # # Una de las opciones más importantes puede ser la codificación de texto empleada en el archivo. defecto el paquete json genera los archivos en código ASCII. Cuando existen caracteres no ASCII estos serán escapados, aunque puede ser más interesante utilizar en estos casos codificación Unicode. Para conseguir este cambio solamente se ha de configurar la opción ensure_ascii a False. La diferencia se puede apreciar en el siguiente ejemplo. data = {'first_name': 'Daniel', 'last_name': 'Rodríguez'} json.dumps(data) json.dumps(data, ensure_ascii=False) # # Ordenación # # Los objetos JSON son una colección desordenada de conjuntos de para clave valor. El orden de los datos se encuentra definido por la posición en la que estos aparecen en el archivo. En el caos de que sea necesario ordenarlos por la calve se puede configurar la opción sort_keys a True. El resultado de utilizar esta opción se pude ver en el siguiente ejemplo. # + data = { 'first_name': 'Sigrid', 'last_name': 'Mannock', 'age': 27, 'amount': 7.17} json.dumps(data, sort_keys=True)
Clases/Clase 22 - Lectura de archivos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 3: formation control # header to start # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import matplotlib as mp import math import pickle import IPython import scipy.linalg # + def getLaplacian(E, n_vertices, directed=False): L = np.zeros((n_vertices, n_vertices)) for e in E: if directed: L[e[1]][e[1]] += 1 L[e[1]][e[0]] = -1 else: L[e[1]][e[1]] += 1 L[e[0]][e[0]] += 1 L[e[1]][e[0]] = -1 L[e[0]][e[1]] = -1 return L def getIncidence(E, n_vertices): D = np.zeros((n_vertices, len(E))) for i, e in enumerate(E): D[e[0]][i] = -1 D[e[1]][i] = 1 return D def getMSD(x, y, E): MSD = np.zeros((len(E),)) for i in range(len(E)): e = E[i] x1 = x[e[0]] x2 = x[e[1]] y1 = y[e[0]] y2 = y[e[1]] d = np.power(x1-x2, 2) + np.power(y1-y2, 2) d = np.sqrt(d) MSD[i] = d return MSD # - # ## Helper function to display results # # This function can be used to display the behavior of the robots in 2D def make_animation(plotx,E,xl=(-2,2),yl=(-2,2),inter=25, display=False): ''' takes a graph and motion of vertexes in 2D and returns an animation E: list of edges (each edge is a pair of vertexes) plotx: a matrix of states ordered as (x1, y1, x2, y2, ..., xn, yn) in the rows and time in columns xl and yl define the display boundaries of the graph inter is the interval between each point in ms ''' fig = mp.figure.Figure() mp.backends.backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(111, autoscale_on=False, xlim=xl, ylim=yl) ax.grid() list_of_lines = [] for i in E: #add as many lines as there are edges line, = ax.plot([], [], 'o-', lw=2) list_of_lines.append(line) def animate(i): for e in range(len(E)): vx1 = plotx[2*E[e][0],i] vy1 = plotx[2*E[e][0]+1,i] vx2 = plotx[2*E[e][1],i] vy2 = plotx[2*E[e][1]+1,i] list_of_lines[e].set_data([vx1,vx2],[vy1,vy2]) return list_of_lines def init(): return animate(0) ani = animation.FuncAnimation(fig, animate, np.arange(0, len(plotx[0,:])), interval=inter, blit=True, init_func=init) plt.close(fig) plt.close(ani._fig) if(display==True): IPython.display.display_html(IPython.core.display.HTML(ani.to_html5_video())) return ani # We wish the control the formation of 4 robots randomly distributed in the environment to keep the formation shown in the figure of Exercise 2. # ## Question 1 # Assume each agent has state space dynamics $\dot{\mathbf{p}}_i =\mathbf{u}_i$, with $\mathbf{u}_i$ in $\mathbb{R}^2$ and $\mathbf{u} = [\mathbf{u}_1, \mathbf{u}_2, \mathbf{u}_3, \mathbf{u}_4]$. # # Implement the second order linear control law seen in the class # $$\mathbf{u} = -k \mathbf{L} \mathbf{x} + k \mathbf{D z}_{ref}$$ # # where $k>0$ is a positive gain and $\mathbf{D}$ is the incidence matrix of the graph. # # # Simulate the control law for several random initial conditions of the agents (in 2D). What do you observe? How does it compare to the same control law but for a framework with a complete graph? # + with open('example_animation.pickle', 'rb') as f: data = pickle.load(f) E = data['E'] n_vertices = 4 L = getLaplacian(E, n_vertices, directed=False) D = getIncidence(E, n_vertices) Px_des = np.array([0, 1, 1, (2-math.sqrt(14))/4]).T Py_des = np.array([0, 0, -1, -(math.sqrt(14)+2)/4]).T x = np.array([0, 0.5, -0.3, -0.1]).T y = np.array([0.1, 0.2, 0.3, 0.1]).T K = 2 * np.eye(4) xs = x ys = y ts = [0] t = 0 dt = 0.001 while t < 10: zx_ref = np.matmul(D.T, Px_des) dx = -np.matmul(K, np.matmul(L, x)) + np.matmul(K, np.matmul(L, Px_des)) zy_ref = np.matmul(D.T, Py_des) dy = -np.matmul(K, np.matmul(L, y)) + np.matmul(K, np.matmul(L, Py_des)) x += dt*dx y += dt*dy xs = np.vstack((xs, x)) ys = np.vstack((ys, y)) t += dt ts.append(t) data = np.zeros((0, xs.shape[0])) for i in range(xs.shape[1]): data = np.vstack((data, xs[:, i].T)) data = np.vstack((data, ys[:, i].T)) plotx = data[:,::50] make_animation(plotx, E, inter=50, display=True) # - # ### Complete Graph # + E = [[0, 1], [1, 2], [0, 2], [0, 3], [2, 3], [1, 3]] n_vertices = 4 L = getLaplacian(E, n_vertices, directed=False) D = getIncidence(E, n_vertices) Px_des = np.array([0, 1, 1, (2-math.sqrt(14))/4]).T Py_des = np.array([0, 0, -1, -(math.sqrt(14)+2)/4]).T x = np.array([0, 0.5, -0.3, -0.1]).T y = np.array([0.1, 0.2, 0.3, 0.1]).T K = 2 * np.eye(4) xs = x ys = y ts = [0] t = 0 dt = 0.001 while t < 10: zx_ref = np.matmul(D.T, Px_des) dx = -np.matmul(K, np.matmul(L, x)) + np.matmul(K, np.matmul(L, Px_des)) zy_ref = np.matmul(D.T, Py_des) dy = -np.matmul(K, np.matmul(L, y)) + np.matmul(K, np.matmul(L, Py_des)) x += dt*dx y += dt*dy xs = np.vstack((xs, x)) ys = np.vstack((ys, y)) t += dt ts.append(t) data = np.zeros((0, xs.shape[0])) for i in range(xs.shape[1]): data = np.vstack((data, xs[:, i].T)) data = np.vstack((data, ys[:, i].T)) plotx = data[:,::50] make_animation(plotx, E, inter=50, display=True) # - # The control law for both the incomplete and complete graph renders the same result. # ## Question 2 # Assume each agent has state space dynamics $\dot{\mathbf{p}}_i =\mathbf{u}_i$, with $\mathbf{u}_i$ in $\mathbb{R}^2$ and $\mathbf{u} = [\mathbf{u}_1, \mathbf{u}_2, \mathbf{u}_3, \mathbf{u}_4]$. # # We now consider the following control law # $$\begin{equation} # \mathbf{u} = \mathbf{R}_\mathcal{G}^T(\mathbf{p}) (\mathbf{g}_d - \mathbf{g}_\mathcal{G}(\mathbf{p})) # \end{equation}$$ # where $\mathbf{R}_\mathcal{G}$ is the rigidity matrix associated to the graph of the framework, $\mathbf{g}_d$ is the vector of desired square distance between agents and $\mathbf{g}_\mathcal{G}$ is the measured square distance between each agent. # # Simulate the control law for several random initial conditions of the agents (in 2D). What do you observe? How does it compare to the same control law but for a framework with a complete graph? # + gd = np.array([1, 1, math.sqrt(2), 1.5, 1.5]) R = np.array([[-2, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, -2, 0, 0], [-2, 2, 0, 0, 2, -2, 0, 0], [(math.sqrt(14)-2)/2, (math.sqrt(14)+2)/2, 0, 0, 0, 0, (2-math.sqrt(14))/2, -(math.sqrt(14)+2)/2], [0, 0, 0, 0, (2+math.sqrt(14))/2, (math.sqrt(14)-2)/2, -(2+math.sqrt(14))/2, (2-math.sqrt(14))/2]]) p = np.array([0, 0.1, 0.5, 0.2, -0.3, 0.3, -0.1, 0.1]).T ps = p ts = [0] t = 0 dt = 0.001 E = [[0, 1], [1, 2], [0, 2], [0, 3], [2, 3]] while t < 10: x = [p[0], p[2], p[4], p[6]] y = [p[1], p[3], p[5], p[7]] u = np.matmul(R.T, gd-getMSD(x, y, E)) p += dt*u ps = np.vstack((ps, p)) t += dt ts.append(t) data = ps.T plotx = data[:,::50] make_animation(plotx, E, inter=50, display=True) # + ### Complete Graph # + gd = np.array([1, 1, math.sqrt(2), 1.5, 1.5, math.sqrt(2)*(math.sqrt(14)+2)/4]) R = np.array([[-2, 0, 2, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, -2, 0, 0], [-2, 2, 0, 0, 2, -2, 0, 0], [(math.sqrt(14)-2)/2, (math.sqrt(14)+2)/2, 0, 0, 0, 0, (2-math.sqrt(14))/2, -(math.sqrt(14)+2)/2], [0, 0, 0, 0, (2+math.sqrt(14))/2, (math.sqrt(14)-2)/2, -(2+math.sqrt(14))/2, (2-math.sqrt(14))/2], [0, 0, (math.sqrt(14)+2)/4, (math.sqrt(14)+2)/4, 0, 0, -(math.sqrt(14)+2)/4, -(math.sqrt(14)+2)/4]]) p = np.array([0, 0.1, 0.5, 0.2, -0.3, 0.3, -0.1, 0.1]).T ps = p ts = [0] t = 0 dt = 0.001 E = [[0, 1], [1, 2], [0, 2], [0, 3], [2, 3], [1, 3]] while t < 10: x = [p[0], p[2], p[4], p[6]] y = [p[1], p[3], p[5], p[7]] u = np.matmul(R.T, gd-getMSD(x, y, E)) p += dt*u ps = np.vstack((ps, p)) t += dt ts.append(t) data = ps.T plotx = data[:,::50] make_animation(plotx, E, inter=50, display=True) # - # ## Question 3 # # How would you compare both control laws? What are the pros and cons of each of them? # # The first controller is only rotation invariant, while the second one is neither. This can be seen from the controller definition, we see that for the first controller it can also be written as, # $$\dot{P}_x = -KLP_x - KLP_x^{\mathrm{des}}$$ # $$\dot{P}_y = -KLP_y - KLP_y^{\mathrm{des}}$$ # we can see that as long as the relative $x$ and $y$ distance are the same as desired the controller will output 0, which disregards the abosolute translation of the system. # # For the second controller for the stable system all required is the distance between vertices are equal to the desired distance thus it is rotation and translation invariant. # + # for example assume that you have simulated a formation control in 2D and stored the data in a file # we load the data needed for the display with open('example_animation.pickle', 'rb') as f: data = pickle.load(f) # this is the list of edges (as we usually define them for an undirected graph) E = data['E'] print('the list of edges is:') print(E) # this is the time of simulation t = data['t'] # this is an array containing the evolution of the states of the robot # x[0,:] contains the time evolution of the x variable of robot 1 # x[1,:] contains the time evolution of the y variable of robot 1 # x[2,:] contains the time evolution of the x variable of robot 2 # etc x = data['x'] # since we simulated with a small delta t = 0.001, we want to subsample for display # we just take data every 50ms plotx = x[:,::50] make_animation(plotx, E, inter=50, display=True) # a video showing the behavior of the robots and the connection between the robots should be displayed below
series3/Exercise3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''base'': conda)' # name: python385jvsc74a57bd0380030d1298d5a27518acca789ff38fe82bbf2e68b73263de6a6bf23efb7704c # --- # + import numpy as np import subprocess import sys import matplotlib.pyplot as plt # %matplotlib inline EXECUTABLE = "sort.exe" SEPARATOR = "||" T_FLASH_SORT = '1' T_FLASHY_SORT = '2' T_IS_SORTED = '1' INT = "--int" FLOAT = "--float" DOUBLE = "--double" def eval(problem_sizes, dtypeopt=FLOAT, verbose=False): flash_sort_times = [] flashy_sort_times = [] nums = [] for N in problem_sizes: res1 = -1 res2 = -1 cmd = EXECUTABLE + " -s -n " + str(N) + " " + dtypeopt if verbose: print("Running:",cmd) outputraw = subprocess.run(cmd, capture_output=True) output = { 'code': outputraw.returncode, 'stdout': outputraw.stdout.decode('utf-8'), 'stderr': outputraw.stderr.decode('utf-8') } if output['stderr'] != "": print("ERROR:\n",output['stderr']) return None lines = output['stdout'].split('\n') placed = 2 for l in lines: tokens = l.split(SEPARATOR) if tokens[0] == T_FLASH_SORT: if tokens[1] == T_IS_SORTED: res1 = float(tokens[2]) placed-=1 elif tokens[0] == T_FLASHY_SORT: if tokens[1] == T_IS_SORTED: res2 = float(tokens[2]) placed-=1 if placed != 0: print("error occured!\n----\nCMD:",cmd) print("\tRETCODE:",output['code']) print("\tSTDOUT:",output['stdout']) print("\tSTDERR:",output['stderr']) print("----") else: nums.append(N) flash_sort_times.append(res1) flashy_sort_times.append(res2) return nums, flash_sort_times, flashy_sort_times # - n = list(range(int(1e5), int(1e7), int(1e5))) n, y1, y2 = eval(n, dtypeopt=DOUBLE) print("done") # Scatter plot of N vs Time #print(len(n), len(y1), len(y2)) _, ax = plt.subplots() ax.scatter(n, y1, marker='.', c='red') ax.scatter(n, y2, marker='.', c='blue') ax.legend(['Flash', 'Flashy']) ax.set(title="Runtimes", xlabel='Problem Size', ylabel='Time (ms)') plt.show() # Scatter plot of N vs Speedup _, ax = plt.subplots() y = np.array(y1)/np.array(y2) ax.scatter(n, y, c='red') ax.axhline(1.0) ax.set(title="Flashy Sort Speedup", xlabel='Problem Size', ylabel='Speedup') plt.show()
eval.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from ast import literal_eval # #!pip install pandas pyarrow import pyarrow import glob import pyarrow.parquet as pq import re # Added by Thomas import src.helper.const as const import src.helper.helper as hlp pd.set_option('display.max_colwidth', None) #files = glob.glob('../data/raw/*.parquet.gzip') #movies = pd.concat([pq.read_table(fp).to_pandas() for fp in files]) movies = pq.read_table(source="../data/raw/df.parquet.gzip").to_pandas() movies # + ### Check null values movies.isnull().sum() # there are movies without gender movies = movies.drop(movies[movies['genre_id']=='[]'].index) # drop movies with not available posters movies = movies.drop(movies[movies['poster_exists']==False].index) movies[movies['genre_id']=='[]'] # The list is a list of integers , reason why a I made this function . map_gender={"28":"Action", "12":"Adventure", "16":"Animation", "35":"Comedy", "80":"Crime", "99":"Documentary", "18":"Drama", "10751":"Family", "14":"Fantasy", "36": "History", "27":"Horror", "10402" :"Music", "9648":"Mystery", "10749":"Romance", "878" :"Science Fiction", "10770":"TV Movie", "53":"Thriller", "10752":"War", "37":"Western" } def map_multiclass(x): tmp=[] for y in x.split(","): y=y.replace('[', '') y=y.replace(']', '') y=y.replace(' ', '') if y in map_gender: tmp.append(map_gender[y]) return [','.join(tmp)] movies['genre_ids2']=movies['genre_id'].apply(lambda x: map_multiclass(x)) movies # - movies = movies.drop(movies[movies['poster_url'].isnull()].index) movies # Added by Thomas # Calc distibution of one or multi genres per movie movies["genre_id_count"] = [len(np.fromstring(re.sub("\[|\]| ", "", e), dtype=int, sep=",")) for e in list(movies.genre_id)] print("Genre distribution:") movies.genre_id_count.value_counts().sort_index() # Added by Thomas # Store cleaned dataframe movies.to_parquet("../data/interim/df_cleaned.gzip", compression='gzip') # + tags=[] ## Dist of genders ## Toop 30 multiclasses top30=pd.DataFrame(movies.genre_ids2.value_counts().sort_values(ascending=False)[0:30]).reset_index().rename(columns={'index': 'Multiclass'}) top30.plot.bar( x='Multiclass', y='genre_ids2') # - from sklearn.preprocessing import MultiLabelBinarizer mlb= MultiLabelBinarizer() labeled=pd.DataFrame(mlb.fit_transform(movies['genre_ids2']),columns=mlb.classes_) movies.reset_index(drop=True, inplace=True) movies_multilabel=pd.concat([movies,labeled],axis=1) movies_multilabel movies_multilabel.isnull().sum() movies_multilabel.genre_ids2.nunique movies_multilabel.to_parquet("../data/interim/df_multilabel.parquet.gzip", compression='gzip') # + ################################################################################# ################################################################################# ############################ DATA FRAME SAMPLE ##################################### ################################################################################# ################################################################################# # + ## Review gender name Explode the gender list in x rows to make a class and not multiclass #movies= pd.read_csv('movies - movies.csv') movies_classes = pq.read_table(source="../data/raw/df.parquet.gzip").to_pandas() movies_classes movies_classes['genre_id'] = movies_classes['genre_id'].apply(literal_eval) #convert to list type type was extrange in the excel movie_gender=movies_classes.explode('genre_id') movie_gender[movie_gender.original_title=='Submarine D-1'] # the movie is x times depend on the gender # - movie_gender # + ## merge both datasets gender_list= pd.read_csv('../data/raw/movie_genres - movie_genres.csv') gender_list=gender_list.rename(columns={"id":"genre_id"}) movie_gender_name=pd.merge(movie_gender,gender_list,left_on='genre_id', right_on='genre_id' ,how="left" ) movie_gender_name[movie_gender_name.original_title=='Submarine D-1'] # - movie_gender_name # + ## Check nulls note poster can have null values but not the gender movie_gender_name = movie_gender_name.drop(movie_gender_name[movie_gender_name['poster_url'].isnull()].index) movie_gender_name movie_gender_name=movie_gender_name.dropna() movie_gender_name.isnull().sum() # - movie_gender_name.head(5) top30_separateClass=pd.DataFrame(movie_gender_name.name.value_counts()).reset_index().rename(columns={'index': 'class_'}) top30_separateClass plt.figure(figsize=(30,10)) plt.bar(top30_separateClass.class_,top30_separateClass.name) plt.xticks(rotation=45) # + from sklearn.preprocessing import OneHotEncoder ohe= OneHotEncoder(sparse=False, handle_unknown="ignore").fit(movie_gender_name[['name']]) ohe_class=pd.DataFrame(ohe.transform(movie_gender_name[['name']]),columns=ohe.get_feature_names()) movie_gender_name.reset_index(drop=True, inplace=True) movies_class = pd.concat([movie_gender_name, ohe_class], axis=1) movies_class # - movies_class.isnull().sum() # + movies_class.to_parquet("../data/interim/df_label_correct_exploded.gzip", compression='gzip') # + tmp=pd.read_parquet("../data/interim/df_label_correct_exploded.gzip") # - tmp['name'].unique genre_col=['x0_Action', 'x0_Adventure', 'x0_Animation', 'x0_Comedy', 'x0_Crime', 'x0_Documentary', 'x0_Drama', 'x0_Family', 'x0_Fantasy', 'x0_History', 'x0_Horror', 'x0_Music', 'x0_Mystery', 'x0_Romance', 'x0_Science Fiction', 'x0_TV Movie', 'x0_Thriller', 'x0_War', 'x0_Western' ] buckets=[] for x in genre_col: buckets.append(tmp[tmp[x]==1].sample(1000, random_state=42 )) # + samples_df=pd.concat(buckets) # - samples_df.head(4) b1=samples_df[['id']+genre_col].groupby("id").sum() # + b1 # - b2= samples_df[[ "adult", "id","original_title", "popularity", "video" ,"url" ,"poster_url"]].groupby("id").head(1) b2 b3=pd.merge(b2,b1, on='id') b3 # Added by Thomas # Calc distibution of one or multi genres per movie _ = hlp.get_dist_of_simple_genre_combis(b3, const.GENRE_OHE_COLS, True) # Changed by Thomas #movies_class.to_parquet("../data/interim/df_train_v1.gzip", compression='gzip') b3.to_parquet("../data/interim/df_train_v1.gzip", compression='gzip')
notebooks/01_analyse_movie_paquet_2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # + import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('D:/MNIST/',one_hot=True) train_img = mnist.train.images[0:100] train_img = train_img*2 - 1; # - def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='inputs_real') inputs_real = tf.reshape(inputs_real,[-1,28,28,1]) inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='inputs_z') return inputs_real, inputs_z def discriminator(x,reuse=False): with tf.variable_scope('discriminator',reuse=reuse): conv1 = tf.layers.conv2d(x,6,kernel_size=(1,1),strides=(1,1),activation=tf.nn.relu) pool1 = tf.layers.max_pooling2d(conv1,pool_size=(2,2),strides=(2,2)) conv2 = tf.layers.conv2d(pool1,16,kernel_size=(5,5),strides=(1,1),activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(conv2,pool_size=(2,2),strides=(2,2)) dense1 = tf.layers.flatten(pool2) dense2 = tf.layers.dense(dense1,120,activation=tf.nn.relu) dense3 = tf.layers.dense(dense2,84,activation=tf.nn.relu) logit = tf.layers.dense(dense3,1) out = tf.nn.sigmoid(logit) return out,logit def generator(z,z_dim,reuse=False): with tf.variable_scope('generator',reuse=reuse): g1 = tf.layers.dense(z,3136,activation=tf.nn.relu) g1 = tf.reshape(g1,[-1,56,56,1]) g1 = tf.contrib.layers.batch_norm(g1,epsilon=1e-5) g2 = tf.layers.conv2d(g1,z_dim/2,kernel_size=(3,3),strides=(2,2),padding='same',activation=tf.nn.relu) g2 = tf.contrib.layers.batch_norm(g2,epsilon=1e-5) g3 = tf.layers.conv2d(g2,z_dim/4,kernel_size=(3,3),strides=(2,2),padding='same',activation=tf.nn.relu) g3 = tf.contrib.layers.batch_norm(g3,epsilon=1e-5) g3 = tf.image.resize_images(g3,[56,56]) # final output with one channel logit = tf.layers.conv2d(g3,1,kernel_size=(3,3),strides=(2,2),padding='same') out = tf.nn.tanh(logit) return out,logit z_size = 100 lr = 0.002 # + input_real,input_z = model_inputs(784,z_size) g_model,g_logit = generator(input_z,z_size) d_model_real,d_logit_real = discriminator(input_real) d_model_fake,d_logit_fake = discriminator(g_model,reuse=True) # - print(input_real.shape) # + d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logit_real,labels=tf.ones_like(d_logit_real))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logit_fake,labels=tf.zeros_like(d_logit_fake))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logit_fake,labels=tf.ones_like(d_logit_fake))) # + t_vars = tf.trainable_variables() g_var = [var for var in t_vars if var.name.startswith('generator')] d_var = [var for var in t_vars if var.name.startswith('discriminator')] # - d_train = tf.train.AdamOptimizer(lr).minimize(d_loss,var_list=d_var) g_train = tf.train.AdamOptimizer(lr).minimize(g_loss,var_list=g_var) # + epochs = 500 batch_size = 50 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) g_cost,d_cost = [],[] for e in range(epochs): for i in range(10): batch_img = mnist.train.next_batch(batch_size) batch_img = batch_img[0].reshape(batch_size,784) batch_img = batch_img*2 - 1; batch_img = batch_img.reshape(batch_img.shape[0],28,28,1) batch_z = np.random.uniform(-1,1,(batch_size,z_size)) sess.run(d_train,feed_dict={input_real:batch_img,input_z:batch_z}) sess.run(g_train,feed_dict={input_z:batch_z}) d_ = sess.run(d_loss,feed_dict={input_real:batch_img,input_z:batch_z}) g_ = g_loss.eval(feed_dict={input_z:batch_z}) if e % 10 == 0: d_cost.append(d_) g_cost.append(g_) if e % 25 == 0: print('epoch : ',str(e),' ,d_loss : ',str(d_),' ,g_loss : ',str(g_)) for j in range(3): batch_z = np.random.uniform(-1,1,(batch_size,z_size)) sample,_ = sess.run(generator(input_z,z_size,reuse=True),feed_dict={input_z:batch_z}) sample = np.array(sample) plt.imshow(sample[1].reshape(28,28),cmap='Greys_r') plt.show() sess.close() # - plt.plot(d_cost,'r-') plt.plot(g_cost,'g-') plt.show()
TensorFlow/DCGAN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: p36 (auto) # language: python # name: auto # --- # Create a n-1 ordering of ROIs such that each ROI at scale n is assigned to that ROI at scale n-1 that it overlaps maximally with. This order will determine the organization of ROI informtation in tables as well as the yet to be developed numbering system for ROIs. import os import numpy as np import pandas as pd import nibabel as nib # Variables scales = [7, 12, 20, 36, 64, 122, 197, 325, 444, 'atoms'] # Paths proj_p = '/home/surchs/GDrive/PhD/Paper/Multiscale_Atlas/Partitions/' temp_t = os.path.join(proj_p, 'scale_{}.nii.gz') out_p = '/home/surchs/GDrive/PhD/Paper/Multiscale_Atlas/Labels/roi_order_full.csv' def find_parent(mask, vol): """ Returns the winner take all overlap parent assignment together with the percentage of overlap of the child. """ val, count = np.unique(vol[mask], return_counts=True) p_id = np.argmax(count) parent = val[p_id] overlap = np.round((count[p_id]/np.sum(mask))*100,2) return parent, overlap # Load the templates temp = {scale:nib.load(temp_t.format(scale)).get_data() for scale in scales} n_rois = {scale:len(np.unique(temp[scale]))-1 for scale in scales} # Find the parents parents = dict() for sid, scale in enumerate(scales[::-1]): if scale == 7: continue n_roi = n_rois[scale] child = scale parent = scales[::-1][sid+1] # Mask the child by the parents to take care of the removed midline in the rois pmask = temp[parent]==0 ctemp = temp[child] ctemp[pmask] = 0 parents[scale] = [find_parent(ctemp==roi, temp[parent])[0].astype(int) for roi in np.arange(1,n_roi+1)] # Create the order table from that information table = list() for roi in np.arange(1, n_rois[scales[-1]]+1): decomp = [roi] for scale in scales[::-1][:-1]: parent = parents[scale][roi-1] decomp.append(parent) roi = parent table.append(decomp[::-1]) # Turn the table into a dataframe for easier sorting and saving df = pd.DataFrame(table, columns=['s7', 's12', 's20', 's36', 's64', 's122', 's197','s325','s444', 'sATOM']) df.sort_values(by=['s7', 's12', 's20', 's36', 's64', 's122', 's197','s325','s444', 'sATOM'], inplace=True) df.to_csv(out_p, index=False) df.head()
analyses/determine_parcel_hierarchy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Python dictionary and Pandas dataframe # ## `dict` in Python # You may have some idea about the elementary datatypes in Python such as `integer`, `float`, `complex`, and `string`. You probably also know that one can make `lists` and `arrays` of elementary datatypes to form derived # datatypes. In Python software design, one of the most useful derived datatypes is `dict` denoting a dictionary. # # A `dict` is a set of key/value pairs. Here is an example inspired by Star Wars. # + swdict={ \ 'Luke':23, \ 'Leia':23, \ '<NAME>':36,\ 'Chewbacca':203\ } #swdict={'Luke':23, 'Leia':23, '<NAME>':36,'Chewbacca':203} # dict in single-line # - print(swdict) print(type(swdict)) # You can query (i.e. get/obtain) the value of an element (i.e. an entry) in the set py providing the corresponding key (i.e. label) as a string argument. swdict['Chewbacca'] print('\'Chewbacca\' is the key and ', swdict['Chewbacca'], ' is its value') # Let's update the dictionary, i.e. add a new entry swdict['<NAME>']=45 # add an entry print(swdict) # You can also remove an entry from the dictionary del swdict['Leia'] # remove an entry print(swdict) # You can delete the entire dictionary if needed del swdict # uncomment the following line and `Run` # print(swdict) # Let's declare the dictionary again and learn some more options swdict_full={'Luke':23,'Leia':23,'<NAME>':36,'<NAME>':45,'obi':57,'Chewbacca':203} print(swdict_full) print(len(swdict_full)) # Number of entries subset=('Leia','Luke') swdict_subset= swdict_full.fromkeys(subset) print(swdict_subset) subset=('Leia','Luke') swdict_subset= swdict_full.fromkeys(subset,'23') print(swdict_subset) print(swdict_full.keys()) print(swdict_full.values()) new_character={'Yoda':900} swdict_full.update(new_character) print(swdict_full) # ## Dataframe in Pandas # ``` # Data Structure Dimension Description # Series 1 1D array (Homogeneous data, Size immutable, Value mutable) # DataFrame 2 2D array - container of series (Hetegeneous, Size and Value mutable) # Panel 3 3D array - container of DataFrame # ``` import pandas as pd indices=['char-1','char-2','char-3','char-4'] Names=['Luke','Leia','<NAME>','Chebacca'] SW_series_names=pd.Series(Names,indices) print(SW_series_names) Ages=[23,23,36,203] SW_series_ages=pd.Series(Ages,indices) print(SW_series_ages) SW_df={'Name':SW_series_names, 'Age':SW_series_ages} SW_df=pd.DataFrame(SW_df) print(SW_df) # Here's another way to declare directly the dataframe. # + Names=['Luke','Leia','<NAME>','Chebacca'] Ages=[23,23,36,203] indices=['char-1','char-2','char-3','char-4'] SW_df={'Name':pd.Series(Names,indices), 'Age':pd.Series(Ages,indices)} SW_df=pd.DataFrame(SW_df) print(SW_df) # - # You can also use default indices that start with 0 for most purposes. data=[['Luke',23],['Leia',23],['<NAME>',36],['Chebacca',203]] SW_df=pd.DataFrame(data,columns=['Name','Age']) print(SW_df) # You can access the first entry using the row index `0` print(SW_df.loc[0]) # Again, you can use named indices if you wish. SW_df=pd.DataFrame(data,columns=['Name','Age'],index=['char-1','char-2','char-3','char-4']) print(SW_df) # Now, if you want to query, you have to use the row's `named index`. print(SW_df.loc['char-1']) # For non-string columns, you can do quick statistics with the `describe` function. SW_df.describe() # For fun, let's add one more column. SW_df['Popularity']=[6,9,9,8] print(SW_df) SW_df.describe() # The `corr` function lets you calculate the Pearson correlation between numerical columns. SW_df.corr() # The output is a matrix, called the `correlation matrix`. A value of `1.0` for the off-diagonal elements means, there is perfect correlation between the columns. For a small dataset such as this, one would consider a value of `> 0.9` to be a good correlation. In any case, what we learn from a small value of `0.034` is that `Age` and `Popularity` do not correlate at all! # If you want to learn more about the formula/algorithm used for determining the correlation matrix, then you will have to go through the manual page [https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.corr.html](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.corr.html). # # The default method to determine the correlation matrix is using the Pearson method, which is easy to code ourselves. So let's see what's under the hood! The elements of the correlation matrix is defined as $\rho_{X,Y}=\frac{{\rm cov}(X,Y)}{\sigma_{X}\sigma_{Y}}$, where is $X$ and $Y$ are two columns of the dataframe. The following code clarifies the meaning of each term in the formula. # + import numpy as np x=np.array(SW_df['Age']) y=np.array(SW_df['Popularity']) mu_x=np.mean(x) # mean sigma_x=np.std(x) # std. deviation print(mu_x,sigma_x) mu_y=np.mean(y) # mean sigma_y=np.std(y) # std. deviation print(mu_y,sigma_y) covariance=np.mean( (x-mu_x) * (y-mu_y) ) correlation=covariance/(sigma_x*sigma_y) print(correlation) # - # A very common function to use with dataframes is to quickly have a glance at the histrogram of values. SW_df.plot.hist(bins=200, alpha=0.5) # There's a cool way to make pairwise scatter plots of all the columns (we'll later call each column as a feature). from pandas.plotting import scatter_matrix scatter_matrix(SW_df, alpha=1.0, figsize=(5, 5)); scatter_matrix(SW_df, alpha=1.0, figsize=(5, 5), diagonal="kde"); # Since, in the scatterplot, we do not see points coinciding with the $y=x$ line, we have conclude that the two features `Age` and `Popularity` are not correlated.
notebooks/DS_01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:tf2] # language: python # name: conda-env-tf2-py # --- # + # based on https://github.com/happy-jihye/FFHQ-Alignment/blob/master/Anime-Face-Alignment # https://github.com/hysts/anime-face-detector/blob/main/demo.ipynb # !sudo apt install ffmpeg # !pip install face-alignment # !pip install opencv-python # !git clone https://github.com/NVlabs/stylegan3.git # anime-face-detector # !pip install openmim # !mim install mmcv-full mmdet mmpose -y # !pip install anime-face-detector --no-dependencies # + import os import sys from glob import glob from pathlib import Path from tqdm import tqdm import numpy as np import scipy.ndimage import cv2 import PIL from PIL import Image from shutil import copyfile from IPython.display import display import face_alignment import anime_face_detector videoDir = "Arcane" frameDir = "frames" alignedDir = "alignedFace" filteredDir = "filteredFace" preprocessedDir = "preprocessedFace" dataZip= "arcaneFilteredData.zip" for i in [videoDir,frameDir,alignedDir,filteredDir,preprocesseddDir]: os.makedirs(i, exist_ok=True) # + # # get frames from video # videoList=glob(videoDir+"/*.mp4") # # get 2 frame per sec, best jpg quality # for file in videoList: # name=Path(file).stem # # !ffmpeg -i "$file" -r 2 -q:v 1 -qmin 1 -qmax 1 "$frameDir"/"$name"_%04d.jpg # PIL.Image.open(glob(frameDir+"/*.jpg")[4]) # + def image_align(src_file, dst_file, face_landmarks, output_size=256, transform_size=1024, enable_padding=True, use_landmark_28=False): # Align function from FFHQ dataset pre-processing step # https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py if(use_landmark_28==False): lm = np.array(face_landmarks) lm_chin = lm[0 : 17, :2] # left-right lm_eyebrow_left = lm[17 : 22, :2] # left-right lm_eyebrow_right = lm[22 : 27, :2] # left-right lm_nose = lm[27 : 31, :2] # top-down lm_nostrils = lm[31 : 36, :2] # top-down lm_eye_left = lm[36 : 42, :2] # left-clockwise lm_eye_right = lm[42 : 48, :2] # left-clockwise lm_mouth_outer = lm[48 : 60, :2] # left-clockwise lm_mouth_inner = lm[60 : 68, :2] # left-clockwise mouth_left = lm_mouth_outer[0] mouth_right = lm_mouth_outer[6] else: lm = np.array(face_landmarks) lm_eye_left = lm[11 : 17, :2] # left-clockwise lm_eye_right = lm[17 : 23, :2] # left-clockwise mouth_left = lm[24, :2] mouth_right = lm[26, :2] # Calculate auxiliary vectors. eye_left = np.mean(lm_eye_left, axis=0) eye_right = np.mean(lm_eye_right, axis=0) eye_avg = (eye_left + eye_right) * 0.5 eye_to_eye = eye_right - eye_left mouth_avg = (mouth_left + mouth_right) * 0.5 eye_to_mouth = mouth_avg - eye_avg # Choose oriented crop rectangle. x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] x /= np.hypot(*x) x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) y = np.flipud(x) * [-1, 1] c = eye_avg + eye_to_mouth * 0.1 quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) qsize = np.hypot(*x) * 2 # Load image. img = PIL.Image.open(src_file) # Shrink. shrink = int(np.floor(qsize / output_size * 0.5)) if shrink > 1: rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) img = img.resize(rsize, PIL.Image.ANTIALIAS) quad /= shrink qsize /= shrink # Crop. border = max(int(np.rint(qsize * 0.1)), 3) crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1])) if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: img = img.crop(crop) quad -= crop[0:2] # Pad. pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1])))) pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0)) if enable_padding and max(pad) > border - 4: pad = np.maximum(pad, int(np.rint(qsize * 0.3))) img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') h, w, _ = img.shape y, x, _ = np.ogrid[:h, :w, :1] mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3])) blur = qsize * 0.02 img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0) img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') quad += pad[:2] # Transform. img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) if output_size < transform_size: img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) #display(img) # Save aligned image. img.save(dst_file, quality=100, subsampling=0) landmarks_detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._3D, flip_input=False) # + #get face image from frame for frameFile in tqdm(glob(frameDir+"/*.jpg")): name=Path(frameFile).stem ######################## use anime face detector landmark to align image = cv2.imread(frameFile) preds = detector(image) for i, face_landmark in enumerate(preds): if face_landmark["bbox"][4]<0.5 or np.mean(face_landmark["keypoints"][:,2])<0.3: continue #skip low confidence aligned_face_path = os.path.join(alignedDir, name+"_"+str(i).zfill(4)+".jpg") image_align(frameFile, aligned_face_path, face_landmark["keypoints"],use_landmark_28=True) ######################## use face-alignment landmark to align # face_landmarks=landmarks_detector.get_landmarks(frameFile) # if face_landmarks is None: # continue #skip none output # for i, face_landmark in enumerate(face_landmarks): # aligned_face_path = os.path.join(alignedDir, name+"_"+str(i).zfill(4)+".jpg") # image_align(frameFile, aligned_face_path, face_landmark) # + #filter blurry image threshold=70 for i,file in tqdm(enumerate(glob(alignedDir+"/*.jpg"))): name=Path(file).name image = cv2.imread(file) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # fm= cv2.Laplacian(gray, cv2.CV_64F).var() fm=np.max(cv2.convertScaleAbs(cv2.Laplacian(gray,3))) if threshold < fm: # display(Image.open(file)) copyfile(file, filteredDir+"/"+name) # + # color correction and denoise def better_cb(img, percent=1): # from https://github.com/luftj/MaRE/blob/4284fe2b3307ca407e87e3b0dbdaa3c1ef646731/simple_cb.py if not percent or percent == 0 or percent == 100: return img out_channels = [] cumstops = ( img.shape[0] * img.shape[1] * percent / 200.0, img.shape[0] * img.shape[1] * (1 - percent / 200.0), ) for channel in cv2.split(img): cumhist = np.cumsum(cv2.calcHist([channel], [0], None, [256], (0, 256))) low_cut, high_cut = np.searchsorted(cumhist, cumstops) lut = np.concatenate( ( np.zeros(low_cut), np.around(np.linspace(0, 255, high_cut - low_cut + 1)), 255 * np.ones(255 - high_cut), ) ) out_channels.append(cv2.LUT(channel, lut.astype("uint8"))) return cv2.merge(out_channels) for i, file in tqdm(enumerate(glob(filteredDir + "/*.jpg"))): name = Path(file).name image = cv2.imread(file) image = better_cb(image) # color correction image = cv2.fastNlMeansDenoisingColored(image, None, 3, 3, 7, 21) # denoise img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) img.save(preprocessedDir + "/" + name, quality=100, subsampling=0) # - #display for i,file in tqdm(enumerate(glob(preprocesseddDir+"/*.jpg")[0:10])): display(Image.open(file)) print(len(glob(filteredDir+"/*.jpg"))) # make zip file # !cd stylegan3 && python dataset_tool.py --source="../$preprocessedDir" --dest="../$dataZip"
1.stylegan_preprocess_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Outliers detection: 2D application # # This notebook will be tested on Solar dataset. # # #### References: # - ["Comparing anomaly detection algorithms for outlier detection on toy datasets"](http://scikit-learn.org/dev/auto_examples/plot_anomaly_comparison.html#sphx-glr-auto-examples-plot-anomaly-comparison-py). # %matplotlib inline import warnings warnings.filterwarnings('ignore') import sys sys.path.append('../../') from datasets import solar from tools.reader import get_dcol import numpy as np, pandas as pd import seaborn as sns sns.set(style="white", color_codes=True) from scipy import stats # ### functions ## plot outliers detection results def plot_outliers(X:'array', clf:'scikit estimator',title:str = '', outliers_fraction:float = 0.25): import matplotlib.pyplot as plt xx, yy = np.meshgrid(np.linspace(np.min(X[:,0]), np.max(X[:,0]), 100), np.linspace(np.min(X[:,1]), np.max(X[:,1]), 100)) import time t0 = time.time() #try: # y_pred = clf.fit(X).predict(X) #except: # y_pred = clf.fit_predict(X) try: y_pred = clf.fit_predict(X) scores_pred = clf.negative_outlier_factor_ except: clf.fit(X) scores_pred = clf.decision_function(X) y_pred = clf.predict(X) threshold = stats.scoreatpercentile(scores_pred, 100 * outliers_fraction) t1 = time.time() try: Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()]) except: Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) subplot = plt.subplot() subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7), cmap=plt.cm.Blues_r) a = subplot.contour(xx, yy, Z, levels=[threshold], linewidths=2, colors='red') subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()], colors='orange') colors = np.array(['yellow', 'black']) plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2]) plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plt.xlabel('v1') plt.ylabel('v2') plt.title(title, fontsize = 14) plt.show() # ### load data # load data data, dcol = solar.load() # select columns data = data[['hour','DSWRF267', 'y']] data.rename(columns = {'DSWRF267' : 'v1', 'y' : 'v2'}, inplace = True) # clean del(dcol) # display info data.info() # #### per hour # select rows hour = 11 idata = data[data.hour==hour] idata.drop('hour', axis = 1, inplace = True) # data preparation X = idata[['v1','v2']].values # ### display data g = sns.jointplot(x="v1", y="v2", data=idata, kind = 'hex', color = 'red') # ### Robust covariance # # [covariance.EllipticEnvelope](http://scikit-learn.org/dev/modules/generated/sklearn.covariance.EllipticEnvelope.html#sklearn.covariance.EllipticEnvelope):This one assumes the data is Gaussian and learns an ellipse. It thus degrades when the data is not unimodal. Notice however that this estimator is robust to outliers. from sklearn.covariance import EllipticEnvelope outliers_fraction = 0.25 clf = EllipticEnvelope(contamination=outliers_fraction, support_fraction = None, assume_centered = True) plot_outliers(X, clf,title = 'Robust covariance', outliers_fraction = outliers_fraction) # ### One-Class SVM: # # [svm.OneClassSVM](http://scikit-learn.org/dev/modules/generated/sklearn.svm.OneClassSVM.html#sklearn.svm.OneClassSVM): it is known to be sensitive to outliers and thus does not perform very well for outlier detection. This estimator is best suited for novelty detection when the training set is not contaminated by outliers. That said, outlier detection in high-dimension, or without any assumptions on the distribution of the inlying data is very challenging, and a One-class SVM might give useful results in these situations depending on the value of its hyperparameters. from sklearn import svm from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler gamma = 0.25 estimator = svm.OneClassSVM(nu=outliers_fraction, kernel="rbf",gamma=gamma) clf = Pipeline([ ('scaler', StandardScaler()), ('estimator', estimator), ]) plot_outliers(X, clf,title = 'One-Class SVM') # ### Isolation Forest: # # [ensemble.IsolationForest](http://scikit-learn.org/dev/modules/generated/sklearn.ensemble.IsolationForest.html#sklearn.ensemble.IsolationForest): it seem to perform reasonably well for multi-modal data sets. from sklearn.ensemble import IsolationForest outliers_fraction = 0.3 clf = IsolationForest( n_estimators = 100, max_samples = 'auto', max_features = 1., bootstrap = False, #behaviour='new', contamination=outliers_fraction, random_state=42, n_jobs = -1 ) plot_outliers(X, clf,title = 'Isolation Forest') # ### Local Outlier Factor: # # [neighbors.LocalOutlierFactor](http://scikit-learn.org/dev/modules/generated/sklearn.neighbors.LocalOutlierFactor.html#sklearn.neighbors.LocalOutlierFactor): it seem to perform reasonably well for multi-modal data sets. The advantage of neighbors.LocalOutlierFactor is shown for the third data set, where the two modes have different densities. from sklearn.neighbors import LocalOutlierFactor outliers_fraction = 0.25 clf = LocalOutlierFactor( n_neighbors=25, contamination=outliers_fraction) plot_outliers(X, clf,title = 'Local Outlier Factor') # ## Median Absolute Deviation of RESIDUES: # + from preprocessing.outliers import median2D # loop of hours for ihour in range(24)[:1]: ihour = 11 idata = data[data.hour==ihour] idata.drop('hour', axis = 1, inplace = True) # data preparation X = idata[['v1','v2']].values isoutlier = median2D.launch(X[:,0], X[:,1], percent=20., isplot=True) # - # ### 2D-Gaussian: # + from preprocessing.outliers import multigaussian as ms # loop of hours for ihour in range(24)[:1]: ihour = 11 idata = data[data.hour==ihour] idata.drop('hour', axis = 1, inplace = True) # data preparation X = idata[['v1','v2']].values threshold = .4 X_filtered, dinfo = ms.launch_array(X, threshold, isdeep=True)
notebooks/analysis/analysis_anomalies/notebook-testing_outliers_detection-2D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [py27] # language: python # name: Python [py27] # --- round(2.484,1) a=2.543 c=int(a) print c a=0 b=1 x=b while x<30: print x x=a+b a=b b=x a=0 while a<100: a=a+1 if (a%3==0 and a%5==0): print 'FizzBuzz' #continue elif a%5==0: print 'Buzz' #continue elif a%3==0: print 'Fizz' #continue else: print a print 'program completed' x='100' x=int(x) while x < 1000: x=x+1 x=str(x) a=x[0] b=x[1] c=x[2] x=int(x) y=int(a)**3+int(b)**3+int(c)**3 if x==y: print 'success' print x a = raw_input ("Enter a positive number greater than 10") x=int(a) while x>1: if int(a)%2==0: x=int(a)/2 else: x=int(a)*3+1 print x a=x a = raw_input ('enter a 4 digit number with at least two unequal numbers') print a x=int(a) while x!=6174: asc = "".join(sorted(str(a))) dsc = "".join(sorted(str(a),reverse=True)) print asc print dsc x=int(dsc)-int(asc) a=x print x print 'program completed'
asignments/Assignment1_06Aug2017.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Operators # # > API details. # + #default_exp operators # - #export from smpr3d.kernels import * import torch as th import numpy as np import math as m import numba.cuda as cuda #export def calc_psi(r, t, z, out): out[:] = 0 K = r.shape[0] MY, MX = out.shape gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(K * MY * MX / threadsperblock) psi_kernel[blockspergrid, threadsperblock](r, t, z, out) return out # + #export # - #export def AtF2(z, psi, r, out): """ :param z: K x MY x MX :param psi: B x K x MY x MX :param r: K x 2 :param out: B x NY x NX :return: """ gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(z.shape) / threadsperblock) AtF2_kernel[blockspergrid, threadsperblock](z, psi, r, out) return out #export def dS(z, z_old, psi, psi_int, psi_int_max, r, out, alpha=0.1): """ :param z: K x MY x MX :param z_old: K x MY x MX :param psi: B x K x MY x MX :param psi_int: B x K x MY x MX :param psi_int_max: B x K :param r: K x 2 :param out: B x NY x NX :param alpha: float :return: """ gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(z.shape) / threadsperblock) dS_kernel[blockspergrid, threadsperblock](z, z_old, psi, psi_int, psi_int_max, alpha, r, th.view_as_real(out)) return out #export def A_realspace(r, t, psi, out): """ :param r: K x 2 :param t: BB x NY x NX :param psi: B x K x MY x MX :param out: K x MY x MX :return: """ gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(out.shape) / threadsperblock) # print(r.shape,t.shape,psi.shape,out.shape) A_realspace_kernel[blockspergrid, threadsperblock](r, th.view_as_real(t), th.view_as_real(psi), th.view_as_real(out)) return out # + #export def calc_psi_denom(r, t, out): """ :param r: K x 2 :param t: BB x NY x NX :param out: BB x MY x MX :return: """ out[:] = 0 K = r.shape[0] BB, MY, MX = out.shape gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(BB * K * MY * MX / threadsperblock) psi_denom_kernel[blockspergrid, threadsperblock](r, t, out) return out # - #export def Qoverlap_real2(r, z, out): """ :param r: K x 2 :param z: BB x K x MY x MX :param out: BB x NY x NX :return: out """ BB = out.shape[0] K = r.shape[0] out[:] = 1 gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(BB * K * np.prod(z.shape) / threadsperblock) overlap_kernel_real2[blockspergrid, threadsperblock](r, z, out) return out #export def split(S, r, MY, MX): """ :param S: B x NY x NX :param r: K x 2 :param MY: int :param MX: int :return: """ B = S.shape[0] K, _ = r.shape out = th.zeros((B, K, MY, MX), dtype=th.complex64, device=S.device) gpu = cuda.get_current_device() threadsperblock = gpu.MAX_THREADS_PER_BLOCK // 2 blockspergrid = m.ceil(np.prod(out.shape) / threadsperblock) stream = th.cuda.current_stream().cuda_stream split_kernel4[blockspergrid, threadsperblock, stream](S, r, out) return out # + #export def A(S, Psi, r, r_min, out=None, Mx=0, My=0): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: B x D x K x 2 :param r: D x K x 2 :param out: D x K x MY x MX :return: exit waves in out """ B = S.shape[0] if out is None and My > 0 and Mx > 0: D, K, _ = r.shape out = th.zeros((D, K, My, Mx, 2), dtype=th.float32, device=S.device) else: out[:] = 0 D, K, MY, MX, _ = out.shape gpu = cuda.get_current_device() threadsperblock = 128#gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array((B, MX, MY))) / threadsperblock) smatrix_forward_kernel[blockspergrid, threadsperblock, th.cuda.current_stream().cuda_stream]\ (th.view_as_real(S), Psi.phase_factors, r, r_min, out) return th.view_as_complex(out) @th.jit.script def complex_matmul(a, b): """ Complex matrix multiplication of tensors a and b. Pass conjugate = True to conjugate tensor b in the multiplication. """ are, aim = th.unbind(a, -1) bre, bim = th.unbind(b, -1) real = are @ bre - aim @ bim imag = are @ bim + aim @ bre return th.stack([real, imag], -1) def A_fast_full2(S, phase_factors, r, r_min, MY, MX): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: K x B x 2 :param r: K x 2 :param out: K x MY x MX x 2 :return: exit waves in out """ B = S.shape[0] K, _ = r.shape out = th.zeros((K, MY, MX, B, 2), dtype=th.float32, device=S.device) K, MYMX, _, _, _ = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = 256 # gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array((K, MX, MY, B))) / threadsperblock) phase_factors2 = phase_factors.unsqueeze(2) # 1 - get crops from S-matrix split_kernel[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out) out = out.view((K, MY * MX, B, 2)) # 2 - complex batched matmul: K x MY*MX x B x 2 @ K x B x 1 x 2 # print(out.shape) # print(phase_factors2.shape) exitwaves = complex_matmul(out, phase_factors2) # 3 - reshape exitwaves = exitwaves.view((K, MY, MX, 2)) return exitwaves def A_fast_full3(S, phase_factors, r, r_min, MY, MX): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: K x B :param r: K x 2 :param out: K x MY x MX :return: exit waves in out """ B = S.shape[0] K, _ = r.shape out = th.zeros((K, MY, MX, B, 2), dtype=th.float32, device=S.device) K, MYMX, _, _, _ = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array((K, MX, MY, B))) / threadsperblock) # 1 - get crops from S-matrix split_kernel[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out) # threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK # blockspergrid = m.ceil(np.prod(np.array((K, B))) / threadsperblock) # # 1 - get crops from S-matrix # split_kernel2[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out) out = out.view((K, MY * MX, B, 2)) out = th.view_as_complex(out) # 1.5 - convert to cupy # 2 - complex batched matmul: K x MY*MX x B @ K x B x 1 # print(out.shape) # print(phase_factors2.shape) # print(out.dtype) # print(phase_factors2.dtype) phase_factors2 = phase_factors.unsqueeze(2) exitwaves = out @ phase_factors2 # 3 - reshape exitwaves = exitwaves.view((K, MY, MX)) #4 convert to pytorch return exitwaves def A_fast_full5(S, phase_factors, r, r_min, MY, MX): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: K x B :param r: K x 2 :param out: K x MY x MX :return: exit waves in out """ B = S.shape[0] K, _ = r.shape out = th.zeros((K, B, MY, MX), dtype=th.complex64, device=S.device) K, B, MY, MX = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = gpu.MAX_THREADS_PER_BLOCK // 2 blockspergrid = m.ceil(np.prod(np.array((K, B, MY, MX))) / threadsperblock) # 1 - get crops from S-matrix split_kernel4[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, th.view_as_real(out)) # threadsperblock = 128 # gpu.MAX_THREADS_PER_BLOCK # blockspergrid = m.ceil(np.prod(np.array((K, B))) / threadsperblock) # # 1 - get crops from S-matrix # split_kernel2[blockspergrid, threadsperblock, stream](th.view_as_real(S), r, out) out = out.view((K, B, MY * MX)) # 1.5 - convert to cupy # 2 - complex batched matmul: K x 1 x B @ K x B x MY*MX --> K x 1 x MY * MX # print(out.shape) # print(phase_factors2.shape) # print(out.dtype) # print(phase_factors2.dtype) phase_factors2 = phase_factors.unsqueeze(1) exitwaves = phase_factors2 @ out # 3 - reshape exitwaves = exitwaves.view((K, MY, MX)) #4 convert to pytorch return exitwaves def A_fast_full4(S, phase_factors, r, r_min, out=None, Mx=0, My=0): """ Fastest version, takes precomputed phase factors, assumes S-matrix with beam tilt included :param S: B x NY x NX :param phase_factors: B x D x K x 2 :param r: D x K x 2 :param out: D x K x MY x MX :return: exit waves in out """ B = S.shape[0] if out is None and My > 0 and Mx > 0: D, K, _ = r.shape out = th.zeros((D, K, My, Mx, 2), dtype=th.float32, device=S.device) D, K, MY, MX, _ = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream tbp = int(gpu.MAX_THREADS_PER_BLOCK**(1/3)) # max dim of thread block (1024,1024,64), with a total of 1024 max # max dim of grid (2^32-1 , 2^16-1, 2^16-1) # (2^32-1 , 65535, 65535) threadsperblock = (tbp, tbp, tbp) blockspergrid = tuple(np.ceil((K/tbp, MY/tbp, MX/tbp)).astype(np.int))# m.ceil(np.prod(np.array((B, MX, MY))) / threadsperblock) smatrix_forward_kernel_fast_full4[blockspergrid, threadsperblock, stream](th.view_as_real(S), phase_factors, r, r_min, out) return out # - #export from smpr3d.kernels import smatrix_backward_kernel_S def AH_S(z, Psi, r, r_min, out=None, tau = th.tensor([1.0]), Ny=-1, Nx=-1): """ Adjoint S-matrix operator for the full S-matrix. Expects pre-computed phase-factors as inputs. :param z: D x K x My x Mx :param Psi: D x My x Mx :param r: D x K x 2 :param r_min: 2 :param out: B x NY x NX x 2 :param Ny: optional, int :param Nx: optional, int :return: result of adjoint S-matrix operator, shape (B x NY x NX x 2) """ D, K, MY, MX = z.shape B = Psi.phase_factors.shape[0] if out is None and Ny > 0 and Nx > 0: out_is_gradient = True out = th.zeros((B, Ny, Nx, 2), dtype=th.float32, device=z.device) else: out_is_gradient = False tau /= (K * D) # shape D mean_probe_intensities = th.norm(Psi, p=2, dim=(1, 2)) mean_probe_intensities /= MX * MY gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = 128#gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array((B, MY, MX))) / threadsperblock) smatrix_backward_kernel_S[blockspergrid, threadsperblock, stream] \ (th.view_as_real(z), Psi.phase_factors, mean_probe_intensities, r, r_min, out, tau) if out_is_gradient: out /= (K * D) return th.view_as_complex(out) #export from smpr3d.kernels import phase_factor_kernelDBK def smatrix_phase_factorsBDK(Psi, r, take_beams, q, B, out=None): """ Abbreviations: B: number of (input) beams in S-matrix D: number of scans/ aperture functions K: number of scan positions MY/MX: detector shape NY/NX: S-matrix shape :param Psi: q D x B :param r: D x K x 2 :param take_beams: MY x MX :param q: 2 x MY x MX :param out: B x D x K x 2 :return: """ if out is None: D, K, _ = r.shape out = th.zeros((B, D, K, 2), dtype=th.float32, device=Psi.device) else: out[:] = 0 _, D, K, c = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = 128#gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array(out.shape[:-1])) / threadsperblock) tb = take_beams[None, ...].expand(*Psi.shape) Psi_DB = Psi[tb].reshape(D, B) tb = take_beams[None, ...].expand(*q.shape) qB = q[tb].reshape(2, B) phase_factor_kernelDBK[blockspergrid, threadsperblock, stream](th.view_as_real(Psi_DB), r, qB, out) return out #export from smpr3d.kernels import phase_factor_kernelKB def smatrix_phase_factorsKB(Psi, r, take_beams, q, q_indices, B, out=None): """ Abbreviations: B: number of (input) beams in S-matrix K: number of scan positions MY/MX: detector shape NY/NX: S-matrix shape :param Psi: q MY x MX :param r: K x 2 :param take_beams: MY x MX :param q: 2 x MY x MX :param q_indices: 2 x MY x MX :param out: K x B x 2 :return: """ if out is None: K, _ = r.shape out = th.zeros((K, B, 2), dtype=th.float32, device=Psi.device) else: out[:] = 0 K, B, c = out.shape gpu = cuda.get_current_device() stream = th.cuda.current_stream().cuda_stream threadsperblock = 128#gpu.MAX_THREADS_PER_BLOCK blockspergrid = m.ceil(np.prod(np.array(out.shape[:-1])) / threadsperblock) tb = take_beams.expand(*Psi.shape) Psi_B = Psi[tb].reshape(B) tb = take_beams[None, ...].expand(*q_indices.shape) qB = q[tb].reshape(2, B) phase_factor_kernelKB[blockspergrid, threadsperblock, stream](th.view_as_real(Psi_B), r, qB, out) return out
nbs/30_operators.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .scala // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: Apache Toree - Scala // language: scala // name: apache_toree_scala // --- // # 03: WordCount - Alternative Implementations of WordCount // // This exercise also implements *Word Count*, but it uses a slightly simpler approach. It also shows one way to make the code more configurable. We'll define variables for the input and output locations. The corresponding Spark program, [WordCount3.scala](https://github.com/deanwampler/spark-scala-tutorial/blob/master/src/main/scala/sparktutorial/WordCount3.scala) uses a utility library to support command-line arguments. (The library demonstrates some idiomatic, but fairly advanced Scala code, but you can ignore the details and just use it.) // // Next, instead of using the old approach of creating a `SparkContext`, like we did in <a href="02_WordCount.ipynb" target="02_WC">02_WordCount</a>, we'll use the now recommended approach of creating a `SparkSession` and extracting the `SparkContext` from it when needed. Finally, we'll also use [Kryo Serialization](http://spark.apache.org/docs/latest/tuning.html), which provides better compression and therefore better utilization of memory and network bandwidth (not that we really need it for this small dataset...). // // This version also does some data cleansing to improve the results. The sacred text files included in the `data` directory, such as `kjvdat.txt` are actually formatted records of the form: // // ```text // book|chapter#|verse#|text // ``` // // That is, pipe-separated fields with the book of the Bible (e.g., Genesis, but abbreviated "Gen"), the chapter and verse numbers, and then the verse text. We just want to count words in the verses, although including the book names wouldn't change the results significantly. (Now you can figure out the answer to one of the exercises in the previous example...) // The corresponding Spark program is [WordCount2.scala](https://github.com/deanwampler/spark-scala-tutorial/blob/master/src/main/scala/sparktutorial/WordCount2.scala). It shows you how to structure a Spark program, including imports and one way to construct the required [SparkContext](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.SparkContext) // // We'll use the KJV Bible text again. Subsequent exercises will add the ability to specify different input sources using command-line arguments. // // This time, we'll define variables for the input and output data locations. So, if you want to use the same code to process different data, just edit the next cell: val in = "../data/kjvdat.txt" // input file val out = "output/kjv-wc3" // output location (directory) // Like before, we read the text file, convert the lines to lower case, and tokenize into works. Let's start this time by defining a helper method that handles the record format; it will strip off the leading `book|chapter#|verse#|`, leaving just the verse text. we do this by splitting the line on `|`, which returns an `Array[String]`. Then we keep the last element. def toText(str: String): String = { val ary = str.toLowerCase.split("\\s*\\|\\s*") if (ary.length > 0) ary.last else "" } val input = sc.textFile(in).map(line => toText(line)) // could also write ...map(toText) // Recall that if you will read `input` several times, then cache the data so Spark doesn't reread from disk each time! input.cache // The following is one long statement that is similar to what we saw in _02_WordCount_, but with a few differences. // // Take the `input` and: // 1. Split each line on non-alphanumeric characters (a crude form of tokenization). `flatMap` "flattens" each array returned into a since `RDD` of words // 2. Use `countByValue` to treat each word as a key, then count all the keys. This returns a Scala `Map[String,Long]` to the driver, so be careful about `OutOfMemory` (`OOM`) errors for very large datasets. val wc1 = input .flatMap(line => line.split("""[^\p{IsAlphabetic}]+""")) .countByValue() // Returns a Scala Map[T, Long] to the driver; no more RDD! // Now let's convert back to an `RDD` for output. We'll use one partition (the `1` argument you'll see below). To do this, we first convert to a comma-separated string. Note that calling `map` on a scala `Map` passes two-element tuples for the key-value pairs to the function. We extract the first and second elements with the `_1` and `_2` methods, respectively, with which we format strings for output. val wc2 = wc1.map(key_value => s"${key_value._1},${key_value._2}").toSeq // returns a Seq[String] val wc = sc.makeRDD(wc2, 1) // Save the results. // // > **Note:** If you run the next cell more than once, _delete the output directory first!_ Spark, following Hadoop conventions, won't overwrite an existing directory. println(s"Writing output to: $out") wc.saveAsTextFile(out) // ## Recap // // Question: how is the output in `notebooks/output/kjv-wc3` different from the output we generated for _02_WordCount_, `notebooks/output/kjv-wc2`? // // The `countByValue` function is very convenient for situations like this, but it's not widely used because of its narrow purpose and the risk of exceeding available memory in the job driver. // ## Exercises // ### Exercise 1: Try different inputs // // Change the input `in` and output `out` definitions above to try different files. Does the helper function `toText` need to be changed? // ### Exercise 2: Sort by word length // // How would you tell the Scala collections library or the `RDD` API to sort by the length of the words, rather than alphabetically? Look at the sort methods in both libraries. Most of the time, you pass a function that will take as the argument the full "record", then you return something to use for sorting. // ### Exercise 3: Repeat any of the _02_WordCount_ exercises // // Some you might try doing in the Scala collection transformations, rather than using `RDD` transformations.
notebooks/03_WordCount.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Aquí van las librerías y funciones que va a usar import matplotlib.pyplot as plt import sympy as sym # %matplotlib inline sym.init_printing() # #!pip install control import control ## Puede que requiera incorporar más librerías o definir nuevas funciones. Hágalo a continuación # - # # Tarea # # Nombres: **Pongan aquí sus nombres completos (2 integrantes)** # Suponga que tiene un sistema de tiempo continuo que se excita con una entrada $x(t)$ y responde con una señal $y(t)$, como lo muestra la figura: # # ![Diagrama de un bloque](figuras\bloque_tiempo.png) # # Analice el modelo del sistema para los modelos en cada uno de los casos siguientes: # ## Análisis modelo A: $ \frac{dy}{dt} + 5y(t) = 5x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráficas obtenidas, escriba su análisis enfocándose en la estabilidad del sistema, el tipo de amortiguamiento, las características de sistemas como filtro, resonancias, etc. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis modelo B: $ \frac{dy}{dt} + 2y(t) = 5x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis modelo C: $ \frac{dy}{dt} + 0.1y(t) = 5x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis modelo D: $ \frac{d^{2}y}{dt^{2}} + 5\frac{dy}{dt} + y(t) = x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis modelo E: $ \frac{d^{2}y}{dt^{2}} + y(t) = x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis modelo F: $ \frac{d^{2}y}{dt^{2}} + 0.25\frac{dy}{dt} + y(t) = x(t) $ # # - Lleve la ecuación diferencial al dominio de las frecuencias usando la transformada de Laplace. # $$ EscribaAquíLaEcuaciónTransformada $$ # # - Encuentre la función de transferencia en Laplace del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique el mapa de polos y ceros del sistema. # # - Encuentre la función de transferencia en Fourier del sistema. # # $$ EscribaAquíLaFunciónDeTransferencia $$ # # - Grafique la respuesta frecuencial del sistema. # Aquí va el código para generar las gráficas pedidas. # Ejecute el código para generar la gráfica. # Agregue más celdas si lo requiere # - Analice las gráfica obtenidas, escriba su análisis y determine la estabilidad del sistema y el tipo de amortiguamiento. # - Escriba aquí su discusión. # - Puede usar viñetas o párrafos. # - Conserve las sangrías para facilitar la lectura. # + ## Aquí va el código extra que puede requerir para responder a las preguntas. # - # ## Análisis comparativo # # Con base en los resultados anteriores, realice un análisis comparativo y escriba sus observaciones # # - Escriba aquí sus respuestas # - Puede usar viñetas # ## Conclusiones # # Escriba sus conclusiones # # - Use viñetas
tareas/TareaFdT.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Reproduce Figure 1 # 2D adversarial spheres propagated in deep batch-normalized linear network at init # + import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt # %matplotlib inline import torch import torch.nn as nn import torch.utils.data from sklearn.decomposition import PCA # + N = 32 # N is the mini-batch size seed = 3 N_CLASSES = 2 np.random.seed(seed) theta = np.linspace(0, 2 * np.pi, N + 1) xCenter = 0 yCenter = 0 radius = 1; x = radius * np.cos(theta) + xCenter; y = radius * np.sin(theta) + yCenter; # Training set train_x = np.zeros((2 * N, 2)) # DATA, FEATURES train_y = np.zeros(2 * N) # DATA, Int label # class 1 train_x[:N, 0] = x[:N].copy() #+ np.random.randn(N) / 30 train_x[:N, 1] = y[:N].copy() #+ np.random.randn(N) / 30 train_y[:N] = 0 # class 2 train_x[N:, 0] = x[:N].copy() * 1.3 #+ np.random.randn(N) / 30 train_x[N:, 1] = y[:N].copy() * 1.3 #+ np.random.randn(N) / 30 train_y[N:] = 1 is_shuffled = False # - rng_state = np.random.get_state() np.random.shuffle(train_x) np.random.set_state(rng_state) np.random.shuffle(train_y) is_shuffled = True colors_txt = ['red', 'blue'] R_rgba = mpl.colors.to_rgba('red', alpha=1.) B_rgba = mpl.colors.to_rgba('blue', alpha=1.) train_y = train_y.astype('int') colors = np.random.random((2 * N, 4)) for i in range(2 * N): if train_y[i]: colors[i] = B_rgba else: colors[i] = R_rgba # Visualize dataset # + fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 3)) colors_txt = ['red', 'blue'] ax1.scatter(train_x[:N, 0], train_x[:N, 1], c=train_y[:N], alpha=0.5, cmap=mpl.colors.ListedColormap(colors_txt)) ax1.set_title("Batch 1") ax2.scatter(train_x[N:, 0], train_x[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors_txt)) ax2.set_title("Batch 2") ax3.scatter(train_x[:N, 0], train_x[:N, 1], edgecolors=colors, c='w', s=40) ax3.scatter(train_x[N:, 0], train_x[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors_txt)) ax3.set_title("Layer 0") #fig.savefig('AdversarialSpheres.png') # + print("Batch 1") print(np.mean(train_x[:N])) print(np.std(train_x[:N])) print("Batch 2") print(np.mean(train_x[N:])) print(np.std(train_x[N:])) # - torch.manual_seed(seed) # + X_train = torch.tensor(train_x, dtype=torch.float) Y_train = torch.tensor(train_y, dtype=torch.long) # create datasets train_dataset = torch.utils.data.TensorDataset(X_train, Y_train) train_loader_noshuffle = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=N, shuffle=False) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=50, shuffle=True) # - activations = [] def hook(module, input, output): activations.append(output) class LinearNetwork(nn.Module): def __init__(self, input_size, num_layers, num_units, num_classes, do_batch_norm=False): super(LinearNetwork, self).__init__() self.do_batch_norm = do_batch_norm self.input_layer = nn.Linear(input_size, num_units, bias=True) self.features = self._make_layers(num_layers, num_units) self.classifier = nn.Linear(num_units, num_classes, bias=True) def forward(self, inputs): """Forward pass, returns outputs of each layer. Use last out (final) for backprop!""" out = self.input_layer(inputs) out = self.features(out) out = self.classifier(out) return out def _make_layers(self, num_layers, num_units): layers = [] for i in range(num_layers): if self.do_batch_norm: layers += [nn.BatchNorm1d(num_units, momentum=None), nn.Linear(num_units, num_units, bias=True)] else: layers += [nn.Linear(num_units, num_units, bias=True)] return nn.Sequential(*layers) num_units = 10 num_layers = 30 do_batch_norm = True device = torch.device('cpu') #device = torch.device('cuda:0') model = LinearNetwork(train_x.shape[1], num_layers, num_units, N_CLASSES, do_batch_norm=do_batch_norm).to(device) activations=[] with torch.no_grad(): for inputs, labels in train_loader_noshuffle: inputs = inputs.to(device) labels = labels.to(device) out = model(inputs) break print(len(activations)) # + j = 0 L = num_layers * 2 # ReLUNetwork register_idx = 1 for i in range(L): if i == register_idx: j += 1 register_idx += 2 print('%d %s' % (j, model.features[i])) model.features[i].register_forward_hook(hook) # + activations = [] #model.train() # model.eval() for i, (inputs, labels) in enumerate(train_loader_noshuffle): inputs = inputs.to(device) labels = labels.to(device) if i == 0: x_b0 = inputs.detach().cpu().numpy() y_b0 = labels.detach().cpu().numpy() pred = model(inputs) elif i == 1: x_b1 = inputs.detach().cpu().numpy() y_b1 = labels.detach().cpu().numpy() pred = model(inputs) else: break print(len(activations)) # + # concatenate x and y for two mini-batches x_batch = np.concatenate((x_b0, x_b1)) y_batch = np.concatenate((y_b0, y_b1)) fig, axes = plt.subplots(4, 4, figsize=(12, 12)) ax = axes.ravel().tolist() if is_shuffled: ax[0].scatter(x_batch[:N, 0], x_batch[:N, 1], c=train_y[:N], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) ax[0].scatter(x_batch[N:, 0], x_batch[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) else: ax[0].scatter(x_batch[:N, 0], x_batch[:N, 1], c='r', alpha=0.5) ax[0].scatter(x_batch[N:, 0], x_batch[N:, 1], c='b', alpha=0.5) ax[0].set_title("Layer 0", fontsize=14) ax[0].axis('off') layers = num_layers - 1 layer_nb = 2 #correlation_arr = np.zeros(layers) j = 1 for i in range(layers): # loop over rows a = activations[i].detach().cpu().numpy() # batch 0 b = activations[i + layers].detach().cpu().numpy() # batch 1 ab = np.concatenate((a, b)) pca = PCA(n_components=2) pca.fit(ab) ab_pca = np.dot(pca.components_, ab.T).T # project activations onto top-2 components ''' correlation_arr[i] = (np.abs(np.corrcoef(ab_pca[:N, 0], ab_pca[N:, 0])[0][1]) + \ np.abs(np.corrcoef(ab_pca[:N, 1], ab_pca[N:, 1])[0][1])) / 2 ''' if i % 2 == 0 and j < 16: if is_shuffled: ax[j].scatter(ab_pca[:N, 0], ab_pca[:N, 1], c=train_y[:N], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) ax[j].scatter(ab_pca[N:, 0], ab_pca[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) else: ax[j].scatter(ab_pca[:N, 0], ab_pca[:N, 1], c='r', alpha=0.5) ax[j].scatter(ab_pca[N:, 0], ab_pca[N:, 1], c='b', alpha=0.5) ax[j].set_title("Layer %d" % layer_nb, fontsize=14) ax[j].set_aspect('equal', 'box') ax[j].axis('off') layer_nb += 2 j += 1 # - init = 'He' mode = 'Train' if model.training else 'Eval' CKPT_NAME = 'AdvSpheresNoNoiseNoShuffle_ReLUNet_Init%s_L%d_W%d_BN%s_Mode%s_Seed%d_MB%d.png' % (init, num_layers, num_units, do_batch_norm, mode, seed, N) print(CKPT_NAME) #fig.savefig(os.path.join('./img', CKPT_NAME)) plt.rc('text', usetex=True) plt.rc('font', family='serif') # # Figure 1 # # You may get slightly different results than our Figure 1 due to differences in random initialization. # + radii=40 fontsz=16 fig, axes = plt.subplots(1, 3, figsize=(6, 2.2))#, sharex=True, sharey=True) ax = axes.ravel().tolist() if is_shuffled: ax[0].scatter(x_batch[:N, 0], x_batch[:N, 1], edgecolors=colors, c='w', s=radii) ax[0].scatter(x_batch[N:, 0], x_batch[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) ax[0].set_title("Input", fontsize=fontsz) ax[0].set_aspect('equal', 'box') ax[0].axis('off') layers = num_layers - 1 layer_nb = 2 j = 1 for i in range(layers): # loop over rows a = activations[i].detach().cpu().numpy() # batch 0 b = activations[i + layers].detach().cpu().numpy() # batch 1 ab = np.concatenate((a, b)) pca = PCA(n_components=2) pca.fit(ab) ab_pca = np.dot(pca.components_, ab.T).T # project activations onto top-2 components if i % 2 == 0: if j == 1: ax[1].scatter(ab_pca[:N, 0], ab_pca[:N, 1], edgecolors=colors, c='w', s=radii) ax[1].scatter(ab_pca[N:, 0], ab_pca[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) ax[1].set_title("Layer %d" % layer_nb, fontsize=fontsz) #ax[1].set_aspect('equal', 'box') ax[1].axis('off') if j == 7: ax[2].scatter(ab_pca[:N, 0], ab_pca[:N, 1], edgecolors=colors, c='w', s=radii) ax[2].scatter(ab_pca[N:, 0], ab_pca[N:, 1], c=train_y[N:], alpha=0.5, cmap=mpl.colors.ListedColormap(colors)) ax[2].set_title("Layer %d" % layer_nb, fontsize=fontsz) #ax[2].set_aspect('equal', 'box') ax[2].axis('off') layer_nb += 2 j += 1 plt.tight_layout() plt.show() # - init = 'He' mode = 'Train' if model.training else 'Eval' CKPT_NAME = 'AdvSpheresNoNoise3x_LinearNet_Init%s_L%d_W%d_BN%s_Mode%s_Seed%d_MB%d_EdgeColorsBatch.png' % (init, num_layers, num_units, do_batch_norm, mode, seed, N) print(CKPT_NAME) #fig.savefig(os.path.join('./img', CKPT_NAME))
Adversarial_Spheres_Reproduce_Figure_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import dates as mdates from matplotlib import ticker import scipy.stats as sps import datetime as dt import pickle import arviz as az import pyjags from IPython.display import HTML, Latex, Markdown, clear_output start = dt.datetime.now() print(start) # + # find and fix outliers using Hampel filter # Impl from: https://towardsdatascience.com/outlier-detection-with-hampel-filter-85ddf523c73d def hampel_filter_pandas(input_series, window_size, n_sigmas=3.0): k = 1.4826 # scale factor for Gaussian distribution new_series = input_series.copy() # helper lambda function MAD = lambda x: np.median(np.abs(x - np.median(x))) # the use of min_periods is to have rolling window extend towards # the end of the data series; in effect, we can apply hampel filter # to most recent observations # taken from: https://stackoverflow.com/questions/48953313/pandas-rolling-window-boundary-on-start-end-of-series/48953314#48953314 rolling_window_size = 2*window_size+1 rolling_median = input_series.rolling( window=rolling_window_size, min_periods=(rolling_window_size//2), center=True).median() rolling_mad = k * input_series.rolling( window=rolling_window_size, min_periods=(rolling_window_size//2), center=True).apply(MAD) # print(f'rolling_mad = {rolling_mad}, rolling_median = {rolling_median}') diff = np.abs(input_series - rolling_median) where = diff > (n_sigmas * rolling_mad) indices = np.argwhere(where.to_numpy()).flatten() new_series[indices] = rolling_median[indices] return new_series, indices # - # # Italy COVID-19 Rt estimation with MCMC # # A simple method is presented to estimate effective reproduction number $R_t$ of COVID-19 in italian regions with a Markov chain Monte Carlo and Poisson likelihood parametrized on daily new cases. # The jupyter notebook backend is available at [GitHub](https://github.com/maxdevblock/COVID-Rt-MCMC). # # Method and MCMC diagnostics are available at [PDF](https://maxpierini.it/ncov/Rt-MCMC.pdf). # *** SI_mu = 7.5 SI_sd = 3.4 # + shape = SI_mu**2 / SI_sd**2 scale = SI_sd**2 / SI_mu SI_dist = sps.gamma(a=shape, scale=scale) SI_x = np.linspace(SI_dist.ppf(0), SI_dist.ppf(.999), 100) SI_y = SI_dist.pdf(SI_x) omega = SI_x[SI_y.argmax()] mean = SI_dist.stats(moments="m") fig, ax = plt.subplots(figsize=(12, 3)) ax.plot(SI_x, SI_y) ax.axvline(omega, c="r", ls=":", label=f"mode {omega:.1f} days") ax.axvline(mean, c="g", ls="--", label=f"mean {mean:.1f} days") ax.legend() ax.axhline(0, c="k", alpha=.2) ax.set_xlim(0, SI_x.max()) ax.set_yticks([]) ax.set_xlabel("days") ax.set_title(fr"COVID-19 $\mathbf{{SI}}$ serial interval Gamma distribution ($\mu={SI_mu}$ $\sigma={SI_sd}$)") plt.show(); # - adapt = 500 warmup = 1000 sample = 1000 # + df = pd.read_csv( "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv", usecols=['data', 'denominazione_regione', 'nuovi_positivi'], parse_dates=['data'], index_col=['denominazione_regione', 'data'], squeeze=True).sort_index() countries = df.index.get_level_values("denominazione_regione").unique().sort_values() days = df.index.get_level_values("data").unique() # - it = pd.read_csv( "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv", usecols=['data', 'nuovi_positivi'], parse_dates=['data'], index_col=['data'], squeeze=True).sort_index() # + pos_it_raw = it pos_it_flt, _ = hampel_filter_pandas(pos_it_raw, 7, n_sigmas=2.0) pos_it_smt = pos_it_flt.rolling(14, win_type='gaussian', min_periods=1, center=True).mean(std=5).round() pos_it_smt[pos_it_smt<0] = 0 #################### pos_Y_raw = np.ndarray(shape=(countries.size, days.size)) pos_Y_flt = np.ndarray(shape=(countries.size, days.size)) pos_Y_smt = np.ndarray(shape=(countries.size, days.size)) for r, region in enumerate(countries): pos_y_raw = df.loc[region] pos_y_flt, _ = hampel_filter_pandas(pos_y_raw, 7, n_sigmas=2.0) pos_y_smt = pos_y_flt.rolling(14, win_type='gaussian', min_periods=1, center=True).mean(std=5).round() pos_y_smt[pos_y_smt<0] = 0 pos_Y_raw[r] = pos_y_raw.values pos_Y_flt[r] = pos_y_flt.values pos_Y_smt[r] = pos_y_smt.values model_data = { "y": pos_it_smt, "k": pos_it_smt, "yR": pos_Y_smt, "kR": pos_Y_smt, "C": countries.size, "T": days.size } # - modelString = f""" model {{ # Overarching Rt standard deviation tau_R ~ dgamma( 1000 , 100 ) sigma_R <- 1 / sqrt( tau_R ) # Serial interval distribution SI_mu <- {SI_mu} SI_sd <- {SI_sd} SI_sh <- SI_mu^2 / SI_sd^2 SI_ra <- SI_mu / SI_sd^2 SI ~ dgamma( SI_sh , SI_ra ) gamma <- 1 / SI for ( r in 1:C ) {{ # First Rt prior RR[r,1] <- 0 for ( t in 2:T ) {{ # Rt prior for k>0 RRpr[r,t] ~ dnorm( RR[r,t-1] , tau_R ) T(0,) # Rt prior for k=0 RRnu[r,t] ~ dnorm( 0 , tau_R ) T(0,) # Define Rt prior RR[r,t] <- ifelse( kR[r,t-1]==0 , RRnu[r,t] , RRpr[r,t] ) # Avoid k=0 (undefined Rt) KR[r,t] <- ifelse( kR[r,t-1]==0, 1 , kR[r,t-1] ) # Poisson likelihood lambdaR[r,t] <- KR[r,t] * exp( gamma * ( RR[r,t] - 1 ) ) yR[r,t] ~ dpois( lambdaR[r,t] ) }} }} # First Rt prior R[1] <- 0 for ( t in 2:T ) {{ # Rt prior for k>0 Rpr[t] ~ dnorm( R[t-1] , tau_R ) T(0,) # Rt prior for k=0 Rnu[t] ~ dnorm( 0 , tau_R ) T(0,) # Define Rt prior R[t] <- ifelse( k[t-1]==0 , Rnu[t] , Rpr[t] ) # Avoid k=0 (undefined Rt) K[t] <- ifelse( k[t-1]==0, 1 , k[t-1] ) # Poisson likelihood lambda[t] <- K[t] * exp( gamma * ( R[t] - 1 ) ) y[t] ~ dpois( lambda[t] ) }} }} """ # + jags_model = pyjags.Model( code=modelString, data=model_data, chains=4, adapt=adapt, progress_bar=False ) jags_posteriors = jags_model.sample(warmup + sample, vars=["R", "RR", "sigma_R", "tau_R"]) # + percs = np.linspace(50, 99, 20) ymax = np.array([]) uppers = np.ndarray(shape=(countries.size, len(percs), days.size)) lowers = np.ndarray(shape=(countries.size, len(percs), days.size)) means = np.ndarray(shape=(countries.size, days.size)) for r, region in enumerate(countries): pyjags_data_test = az.from_pyjags({"R": jags_posteriors["RR"][r]}, warmup_iterations=warmup, save_warmup=False) summary = az.summary(pyjags_data_test, var_names="^R", filter_vars="regex", kind="stats") for i, p in enumerate(percs[::-1]): upper = np.percentile(pyjags_data_test.posterior.R, p, axis=1) upper = np.mean(upper, axis=0) lower = np.percentile(pyjags_data_test.posterior.R, 100-p, axis=1) lower = np.mean(lower, axis=0) ymax = np.append(ymax, upper[2:].max()) uppers[r,i] = upper lowers[r,i] = lower means[r] = summary["mean"] ymax = ymax.max() for r, region in enumerate(countries): display(Markdown(f"# {region}")) fig, ax = plt.subplots(2, 1, figsize=(15,7)) ax[1].plot(days, df.loc[region].values, 'ok:', lw=1, ms=2, label="observed") ax[1].plot(days, pos_Y_smt[r], label="filtered & smoothed") ax[1].axhline(0, c="k", alpha=.2) ax[1].set_xlim(days[0], days[-1]) ax[1].yaxis.set_ticks_position('both') ax[1].tick_params(labelright=True) ax[1].legend() ax[1].set_title(f"{region} new cases per day") steps = .4/(len(percs)-1) for i, (upper, lower) in enumerate(zip(uppers[r], lowers[r])): alpha = 1-(i*steps)-(.6*np.ceil(i/len(percs))) ax[0].fill_between(days, np.zeros(lower.size), lower, color="w", alpha=alpha) ax[0].fill_between(days, upper, np.ones(lower.size)*12, color="w", alpha=alpha) ax[0].plot(days, means[r], c="k", alpha=.25) ax[0].hlines(np.arange(0, 12, .5), days[0], days[-1], color="k", lw=1, alpha=.1) ax[0].axhline(1, c="k", ls=":", lw=1) ax[0].fill_between(days, 1, 12, color="red", zorder=0) ax[0].fill_between(days, 0, 1, color="green", zorder=0) ax[0].set_xlim(days[0], days[-1]) ax[0].set_title(fr"{region} $R_t$ (CIs: {percs[0]:.0f}%-{percs[-1]:.0f}%)") ax[0].set_ylim(0, ymax) ax[0].yaxis.set_ticks_position('both') ax[0].tick_params(labelright=True) plt.show() plt.close(fig="all") display(Markdown("***")) # - # # Latest Rt # + fig, ax = plt.subplots(figsize=(15,5)) for r, region in enumerate(countries): steps = .4/(len(percs)-1) for i, (upper, lower) in enumerate(zip(uppers[r,:,-1], lowers[r,:,-1])): alpha = 1-(i*steps)-(.6*np.ceil(i/len(percs))) ax.fill_between([r*2,r*2+1], 1, 12, color="red", zorder=0) ax.fill_between([r*2,r*2+1], 0, 1, color="green", zorder=0) ax.fill_between(np.linspace(r*2,r*2+1,10), np.zeros(10), lower, color="w", alpha=alpha) ax.fill_between(np.linspace(r*2,r*2+1,10), upper, np.ones(10)*12, color="w", alpha=alpha) ax.plot(np.linspace(r*2,r*2+1,10), np.ones(10)*means[r][-1], color="w", alpha=.9, zorder=100) ax.axhline(1, c="k", ls=":", lw=1) ax.hlines(np.arange(2, 12, 1), -1, countries.size*2+1, lw=1, alpha=.2) ax.set_ylim(0, ymax) ax.set_xlim(-1, countries.size*2) ax.set_xticks(np.arange(.5, countries.size*2+.5, 2)) ax.set_xticklabels(countries.values, rotation=90) ax.yaxis.set_ticks_position('both') ax.tick_params(labelright=True) ax.set_title(f"Regions latest estimated $R_t$ {days[-1].date()} (CIs: {percs[0]:.0f}%-{percs[-1]:.0f}%)") plt.show(); # - # # Italy # + pyjags_data_tau = az.from_pyjags( {"tau_R": jags_posteriors["tau_R"]}, warmup_iterations=warmup, save_warmup=False) summary_tau = az.summary(pyjags_data_tau) modelStringItaly = f""" model {{ # Estimated Rt precision tau tau_R <- {summary_tau['mean'][0]} # Serial interval distribution SI_mu <- {SI_mu} SI_sd <- {SI_sd} SI_sh <- SI_mu^2 / SI_sd^2 SI_ra <- SI_mu / SI_sd^2 SI ~ dgamma( SI_sh , SI_ra ) gamma <- 1 / SI # First Rt prior R[1] <- 0 for ( t in 2:T ) {{ # Rt prior for k>0 Rpr[t] ~ dnorm( R[t-1] , tau_R ) T(0,) # Rt prior for k=0 Rnu[t] ~ dnorm( 0 , tau_R ) T(0,) # Define Rt prior R[t] <- ifelse( k[t-1]==0 , Rnu[t] , Rpr[t] ) # Avoid k=0 (undefined Rt) K[t] <- ifelse( k[t-1]==0, 1 , k[t-1] ) # Poisson likelihood lambda[t] <- K[t] * exp( gamma * ( R[t] - 1 ) ) y[t] ~ dpois( lambda[t] ) }} }} """ # - model_data_italy = { "y": yit, "k": yit, "T": days.size } # + jags_model_italy = pyjags.Model( code=modelStringItaly, data=model_data_italy, chains=4, adapt=adapt, progress_bar=False ) jags_posteriors_italy = jags_model_italy.sample(warmup + sample, vars=["R"]) # + percs = np.linspace(50, 99, 20) ymax = np.array([]) uppers = np.ndarray(shape=(len(percs), days.size)) lowers = np.ndarray(shape=(len(percs), days.size)) pyjags_data_italy = az.from_pyjags({"R": jags_posteriors["R"]}, warmup_iterations=warmup, save_warmup=False) summary = az.summary(pyjags_data_italy, var_names="^R", filter_vars="regex", kind="stats") for i, p in enumerate(percs[::-1]): upper = np.percentile(pyjags_data_italy.posterior.R, p, axis=1) upper = np.mean(upper, axis=0) lower = np.percentile(pyjags_data_italy.posterior.R, 100-p, axis=1) lower = np.mean(lower, axis=0) ymax = np.append(ymax, upper.max()) uppers[i] = upper lowers[i] = lower ymax = ymax.max() fig, ax = plt.subplots(2, 1, figsize=(15,7)) ax[1].plot(days, pos_it_smt, label="filtered & smoothed") ax[1].plot(days, it, 'ok:', lw=1, ms=2, label="observed") ax[1].axhline(0, c="k", alpha=.2) ax[1].set_xlim(days[0], days[-1]) ax[1].yaxis.set_ticks_position('both') ax[1].tick_params(labelright=True) ax[1].legend() ax[1].set_title(f"ITALY new cases per day") steps = .4/(len(percs)-1) for i, (upper, lower) in enumerate(zip(uppers, lowers)): alpha = 1-(i*steps)-(.6*np.ceil(i/len(percs))) ax[0].fill_between(days, np.zeros(lower.size), lower, color="w", alpha=alpha) ax[0].fill_between(days, upper, np.ones(lower.size)*12, color="w", alpha=alpha) ax[0].plot(days, summary["mean"], c="k", alpha=.25) ax[0].hlines(np.arange(0, 12, .5), days[0], days[-1], color="k", lw=1, alpha=.1) ax[0].axhline(1, c="k", ls=":", lw=1) ax[0].fill_between(days, 1, 12, color="red", zorder=0) #, alpha=.2) ax[0].fill_between(days, 0, 1, color="green", zorder=0) #, alpha=.2) ax[0].set_xlim(days[0], days[-1]) ax[0].set_title(fr"ITALY $R_t$ (CIs: {percs[0]:.0f}%-{percs[-1]:.0f}%)") ax[0].set_ylim(0, ymax) ax[0].yaxis.set_ticks_position('both') ax[0].tick_params(labelright=True) plt.show() plt.close(fig="all") # - # *** end = dt.datetime.now() print(f"{end}: Completed in {end - start}")
Rt-MCMC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Data Structures # # * tuple # * list # * dict # * set # ## Tuple # A tuple is a one dimensional, fixed-length, immutable sequence. # # Create a tuple: tup = (1, 2, 3) tup # Convert to a tuple: list_1 = [1, 2, 3] type(tuple(list_1)) # Create a nested tuple: nested_tup = ([1, 2, 3], (4, 5)) nested_tup # Access a tuple's elements by index O(1): nested_tup[0] # Although tuples are immutable, their contents can contain mutable objects. # # Modify a tuple's contents: nested_tup[0].append(4) nested_tup[0] # Concatenate tuples by creating a new tuple and copying objects: (1, 3, 2) + (4, 5, 6) # Multiply tuples to copy references to objects (objects themselves are not copied): ('foo', 'bar') * 2 # Unpack tuples: a, b = nested_tup a, b tup = ("Akshay", "Verma", "100") tup first_name, last_name, age = tup first_name + " "+ last_name +" " + "Age : "+ age # Unpack nested tuples: (a, b, c, d), (e, f) = nested_tup a, b, c, d, e, f # A common use of variable unpacking is when iterating over sequences of tuples or lists: seq = [( 1, 2, 3), (4, 5, 6), (7, 8, 9)] for index, row in enumerate(seq): print index, row seq = [( 1, 2, 3), (4, 5, 6), (7, 8, 9)] for a, b, c in seq: print a+b # ## List # A list is a one dimensional, variable-length, mutable sequence. # # Create a list: list_1 = [1, 2, 3] list_1 # Convert to a list: type(list(tup)) # Create a nested list: nested_list = [(1, 2, 3), [4, 5]] nested_list # Access a list's elements by index O(1): nested_list[2] # Append an element to a list O(1): nested_list.append(6) nested_list # Insert an element to a list at a specific index (note that insert is expensive as it has to shift subsequent elements O(n)): nested_list.insert(0, 'start') nested_list # Pop is expensive as it has to shift subsequent elements O(n). The operation is O(1) if pop is used for the last element. # # Remove and return an element from a specified index: nested_list.pop(0) nested_list # Locates the first such value and remove it O(n): nested_list.remove((1, 2, 3)) nested_list # Check if a list contains a value O(n): 7 in nested_list # Concatenate lists by creating a new list and copying objects: [1, 3, 2] + [4, 5, 6] # Extend a list by appending elements (faster than concatenating lists, as it does not have to create a new list): nested_list.extend([7, 8, 9]) nested_list # ## Dictionary # A dict is also known as a hash map or associative array. A dict is a mutable collection of key-value pairs. # # Note: Big O complexities are listed as average case, with most worst case complexities being O(n). # # Create a dict: dict_new = {"full_name": "<NAME>", "age": "120"} dict_new["age"] dict_new["Occupation"] = "Unemployed" dict_new["Occupation"] dict_new["Occupation"]= "Data Visualization Engineer" dict_new["Occupation"] dict_1 = { 'a' : 'foo', 'b' : [0, 1, 2, 3] } dict_1 # Access a dict's elements by index O(1) dict_1['b'] # Insert or set a dict's elements by index O(1): dict_1[5] = 'bar' dict_1 # Check if a dict contains a key O(1): 5 in dict_1 # Delete a value from a dict O(1): dict_2 = dict(dict_1) del dict_2[5] dict_2 # Remove and return an element from a specified index O(1): value = dict_2.pop('b') print(value) print(dict_2) # Get or pop can be called with a default value if the key is not found. By default, get() will return None and pop() will throw an exception if the key is not found. value = dict_1.get('z', 0) value # Return a default value if the key is not found: print(dict_1.setdefault('b', None)) print(dict_1.setdefault('z', None)) # By contrast to setdefault(), defaultdict lets you specify the default when the container is initialized, which works well if the default is appropriate for all keys: # + from collections import defaultdict seq = ['foo', 'bar', 'baz'] first_letter = defaultdict(list) for elem in seq: first_letter[elem[0]].append(elem) first_letter # - # dict keys must be "hashable", i.e. they must be immutable objects like scalars (int, float, string) or tuples whose objects are all immutable. Lists are mutable and therefore are not hashable, although you can convert the list portion to a tuple as a quick fix. print(hash('string')) print(hash((1, 2, (3, 4)))) # Get the list of keys in no particular order (although keys() outputs the keys in the same order). In Python 3, keys() returns an iterator instead of a list. dict_1.keys() # Get the list of values in no particular order (although values() outputs the keys in the same order). In Python 3, keys() returns an iterator instead of a list. dict_1.values() # Iterate through a dictionary's keys and values: for key, value in dict_1.items(): print key, value # Merge one dict into another: dict_1.update({'e' : 'elephant', 'f' : 'fish'}) dict_1 # Pair up two sequences element-wise in a dict: mapping = dict(zip(range(7), reversed(range(7)))) mapping # ## set # A set is an unordered sequence of unique elements. # # Create a set: set_1 = set([0, 1, 2, 3, 4, 5]) set_1 set_2 = {1, 2, 3, 5, 8, 13} set_2 # Sets support set operations like union, intersection, difference, and symmetric difference. # Union O(len(set_1) + len(set_2)): set_1 | set_2 # Intersection O(min(len(set_1), len(set_2)): set_1 & set_2 # Difference O(len(set_1)): set_1 - set_2 # Symmetric Difference O(len(set_1)): set_1 ^ set_2 # Subset O(len(set_3)): set_3 = {1, 2, 3} set_3.issubset(set_2) # Superset O(len(set_3)): set_2.issuperset(set_3) # Equal O(min(len(set_1), len(set_2)): {1, 2, 3} == {3, 2, 1}
PythonBasics_1/3 - Data Structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .sh # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Bash # language: bash # name: bash # --- # # Serotyping # A species can be subdivided into different groups based on the antigens expressed on their cell surface. These groups are called serotypes or serovars and the different properties between them can vary greatly. For example, which antigens are expressed on the cell surface of a bacterium can make it more or less virulent, or more or less sensitive to substances like antibiotics. # ## *Streptococcus pneumoniae* # Diseases that are caused by *Streptococcus pneumoniae* are a big problem in public health across the world. There are around 100 known serotypes of *S. pneumoniae*. The current conjugate vaccine (pcv13) covers the 13 most common serotypes causing invasive pneumococcal infections in industrialised countries, but because vaccines are serotype specific, it is of great value to be able to quickly and accurately determine serotypes in order to monitor epidemiological trends of *S. pneumoniae* following the introduction of effective vaccines. # ## Serotyping *S. pneumoniae* # The serotype of a strain of *S. pneumoniae* is determined by the capsular polysaccharide biosynthesis (cps) locus, pictured below. It is a major virulence factor in *S. pneumoniae*, encoding polysaccharide chains that form a capsule around the cell, helping the bacterium avoid the human immune system. # # The cps locus can be very similar between serotypes and based on this serotypes can be grouped into serogroups. Serotypes can also be grouped into serogroups by how similar the anigenic response they trigger is. # ![cps locus](img/cps_schema.png) # Traditionally, determining the serotype of *S. pneumoniae* has predominantly been done with the Quellung reaction or PCR, each with their own limitations. Lately, focus has shifted towards serotyping directly from genomic data. # ## SeroBA # Existing software to infer serotypes from genomic data are limited and do not scale well. SeroBA is a pipeline that can quickly and accurately determine the serotype of *S. pneumoniae* from WGS data (Illumina paired-end reads). It uses k-mer analysis and references to determine the serotype of a sample. # # In this tutorial we will walk you through how to determine the serotypes of two samples using SeroBA, from setting up the necessary databases, to running the analysis, and finally how to interpret the results. # # For more information and to explore the code behind SeroBA, you can visit the [GitHub page](https://github.com/sanger-pathogens/seroba). # # We will start with [setting up the databases](db_setup.ipynb) with a few simple commands. You can also [return to the index](index.ipynb).
SEROBA/serotyping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: BMED360 # language: python # name: bmed360 # --- # # Freesurfer segmentation using bash cell magic with input parameters # # # BMED360-2021 `freesurfer-segmentation-bash.ipynb` (**assuming Linux or MacOS with FS 7.1.1 installed**) from IPython.display import Image Image("assets/sub_102_tp1_mri_orig_wmparc.png") import os from os.path import expanduser, join, basename, split home = expanduser('~') # To make path to local home directory cwd = os.getcwd() import glob from pathlib import Path #Assuming tcsh is installed # e.g. sudo apt-get install tcsh RECON_ALL = '/usr/local/freesurfer/bin/recon-all' print(os.popen(RECON_ALL).read()) # ### Some preparations before running Freesurfer fs711_home = '/usr/local/freesurfer' working_dir = join(cwd, 'data') my_fs711_dir = '%s/fs711_subjects' % (working_dir) my_bids_dir = '%s/bids_bg_bmed360' % (working_dir) if not os.path.exists(my_fs711_dir): os.makedirs(my_fs711_dir) else: print('subdirectory fs711_subjects already exists') # #### Get a list of all T1w files from BIDS directory and renaming # + T1_files = [] subj_names = [] for file_path in Path(my_bids_dir).glob('**/*ses-*_T1w.nii.gz'): T1_files.append(str(file_path)) subj_names.append(str(file_path.name[:-11])) # truncate end of filename: _T1w.nii.gz (= 11 characters) all_T1_files = sorted(T1_files) all_subj_names = sorted(subj_names) print('\n', [os.path.basename(fn) for fn in all_T1_files]) all_subj_names # - # renamimg using list comprehension twice lst1 = [sub.replace('-', '_') for sub in all_subj_names] all_subjects = [sub.replace('ses_', 'tp') for sub in lst1] all_subjects # ### Make lists of all subjects and all their original T1w files to be segmented lst = all_subjects lstfn = all_T1_files lstfs711 = [my_fs711_dir] #print(lst) #print(lstfn) #print(lstfs711) # #### Illustration of using `bash` magic with string input for Freesurfer recon-all, cfr. # https://stackoverflow.com/questions/19579546/can-i-access-python-variables-within-a-bash-or-script-ipython-notebook-c # + magic_args="-s './data/fs711_subjects' 'sub_102_tp1' 'sub_102_tp2' 'sub_103_tp1' 'sub_103_tp2'" language="bash" # # for i in $1 $2 $3 $4 $5 # do # echo $i # done # - # ### To run Freesurfer de novo, uncomment (and adjust the inputs in) the first line # + magic_args="-s '/usr/local/freesurfer' \"{\" \".join(lstfs711)}\" \"{\" \".join(lst)}\" \"{\" \".join(lstfn)}\"" language="bash" # # # Input: # # $1: Freesurfer home directory, e.g. '/usr/local/freesurfer' # # $2: subjects home directory, e.g. './data/fs711_subjects' # # $3: list of subjects to run, e.g. ['sub_101_tp1', 'sub_101_tp2'] # # $4: list of original T1w-files (to copy), according to the list of subjects, e.g. # # ['./data/bids_bg_bmed360/sub-101/ses-1/anat/sub-101_ses-1_T1w.nii.gz', # # './data/bids_bg_bmed360/sub-101/ses-2/anat/sub-101_ses-2_T1w.nii.gz'] # # # Output: # # Freesurfer tree for each of the subjects in the given list # # echo $1 # echo $2 # # FREESURFER_HOME=${1}; export FREESURFER_HOME # PATH=${FREESURFER_HOME}/bin:${PATH}; export PATH # SUBJECTS_DIR=${2}; export SUBJECTS_DIR # FSLDIR=/usr/local/fsl; export FSLDIR # PATH=${FSLDIR}/bin:${PATH}; export PATH # . ${FSLDIR}/etc/fslconf/fsl.sh # source ${FREESURFER_HOME}/SetUpFreeSurfer.sh # # # # PREPARE Data # cd $SUBJECTS_DIR # # ii=0 # counter=0 # # for i in $3 # do # echo $i # # Make FreeSurfer tree for this subject # mksubjdirs $i # cd $i # cd mri/orig # mkdir 001 # # for k in $4 # do # if [ $counter == $ii ] # then # echo $k # # Copy the proper DICOM or NIFTI data to the FS tree # cp -r $k 001/T1_orig.nii.gz # # Convert from NIFTI to MGZ # mri_convert -it nii -ot mgz 001/T1_orig.nii.gz 001.mgz # # # Start FreeSurfer processing # cd $SUBJECTS_DIR # recon-all -s $i -all >& /dev/null & # echo '#recon-all -s $i -all >& /dev/null &' # # fi # ((counter++)) # done # ((ii++)) # counter=0 # # done # # - # `This will take a long time ......` monitor with 'htop' and check `.../scripts/recon-all.log` occasionally
Lab3-diffusion-MRI/freesurfer-segmentation-bash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Regression Miniproject # In this tutorial we will discuss how we can make a regression model and integrate it using flask for front end. Moreover we will also see how to host this model for free to make it useful for everyone. # ## Car Price Prediction # We will be predicting car price based on various features or independent variable such as:-<br> # 1)Name<br> # 2)Year<br> # 3)km_driven<br> # 4)fuel<br> # 5)seller_type<br> # 6)transmission<br> # 7)Owner<br> # # Here the output of the model will be car's selling price.<br> # Let's start the project with importing dataset. # ## Importing Libraries import numpy as np import pandas as pd # Importing the dataset dataset = pd.read_csv('C:\\Users\\SAGAR\\Documents\\ashwini\\latest project\\Car-Price-Prediction-master\\Car-Price-Prediction-master\\car_data.csv') dataset.head() dataset.info() dataset['transmission'].value_counts() X = dataset.iloc[:, [0,1,3,4,5,6,7]] y = dataset.iloc[:, 2] X.head() # Including columns which are of object datatype in modified dataframe df_car_mod = X.select_dtypes(include=['object']) # Viewing first few rows of data df_car_mod.head() # Checking for any null values present in the dataset df_car_mod['seller_type'].value_counts() # Encoding fuel_type using get_dummies df_car_mod = pd.get_dummies(df_car_mod, columns=['fuel','seller_type','transmission'], drop_first=True) df_car_mod.head(15) df_car_mod.dtypes # Create a dictionary to find and replace values dic_to_replace = {"owner": {"First Owner": 1, "Second Owner": 2,"Third Owner": 3,"Fourth & Above Owner": 4,"Test Drive Car":5}} df_car_mod.replace(dic_to_replace, inplace=True) # View first few rows of data df_car_mod['owner'].head() # Enoding make column using LabelBinarizer from sklearn.preprocessing import LabelBinarizer labelbinarizer = LabelBinarizer() make_encoded_results = labelbinarizer.fit_transform(df_car_mod['name']) labelbinarizer.classes_ # Converting an numpy array into a pandas dataframe df_make_encoded = pd.DataFrame(make_encoded_results, columns=labelbinarizer.classes_) # Viewing few rows of data df_make_encoded.sample(10) dfs = [df_make_encoded,X['year'],X['km_driven'],df_car_mod] df_car_mod.drop(['name'],axis=1,inplace=True) df_car_mod.head() res = pd.concat(dfs,axis=1) res.head(15) # + # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(res.values, y.values, test_size = 0.2, random_state = 0) # Feature Scaling """ from sklearn.preprocessing import StandardScaler sc_X = StandardScaler() sc_y = StandardScaler() X = sc_X.fit_transform(X) y = sc_y.fit_transform(y)""" print(X_train[1:15,:]) # - from sklearn.ensemble import RandomForestRegressor regressor = RandomForestRegressor(n_estimators=300,random_state=0) regressor.fit(X_train,y_train) accuracy = regressor.score(X_test,y_test) print(accuracy*100,'%') new_data=["Maruti 800 AC",2007,70000,"Petrol","Individual","Manual","First Owner"] new_data=pd.DataFrame(new_data) new_data_mod = new_data.select_dtypes(include=['object']) new_data_mod=new_data_mod.T print(new_data_mod) new_data_mod_1 = pd.get_dummies(new_data_mod, columns=[3,4,5], drop_first=True) new_data_mod_1
Car-Price-Prediction-master/Car Price Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import requests from bs4 import BeautifulSoup import csv import re import json import sqlite3 import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from time import sleep import os from collections import Counter import pickle import warnings import time warnings.filterwarnings("ignore") from wordcloud import WordCloud import matplotlib.pyplot as plt import PIL from PIL import Image, ImageFilter from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.chrome.options import Options import boto3 import botocore # %matplotlib inline # Use proxy and headers for safe web scraping # os.environ['HTTPS_PROXY'] = 'http://172.16.17.32:8080' # pd.options.mode.chained_assignment = None headers = { 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/' '537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'} # + link = 'https://www.amazon.co.uk/Olaplex-No-5-Bond-Maintenance-Conditioner/dp/B07D37SBHF/ref=lp_2867976031_1_4_s_it?s=beauty&ie=UTF8&qid=1585929007&sr=1-4' def browser(link): '''This funtion opens a selenium based chromebrowser specifically tuned to work for amazon product(singular item) webpages. Few functionality includes translation of webpage, clicking the initial popups, and hovering over product imagesso that the images can be scrape PARAMETER --------- link: str Amazon Product item link RETURN ------ driver: Selenium web browser with operated functions ''' options = Options() prefs = { "translate_whitelists": {"ja":"en","de":'en'}, "translate":{"enabled":"true"} } # helium = r'C:\Users\Dell-pc\AppData\Local\Google\Chrome\User Data\Default\Extensions\njmehopjdpcckochcggncklnlmikcbnb\4.2.12_0' # options.add_argument(helium) options.add_experimental_option("prefs", prefs) options.headless = True driver = webdriver.Chrome(chrome_options=options) driver.get(link) try: driver.find_element_by_xpath('//*[@id="nav-main"]/div[1]/div[2]/div/div[3]/span[1]/span/input').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[3]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[4]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[5]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[6]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[7]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[8]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass try: hover(driver,'//*[@id="altImages"]/ul/li[9]') except: pass try: driver.find_element_by_xpath('//*[@id="a-popover-6"]/div/header/button/i').click() except: pass return driver def scroll(driver): scroll_temp(driver) from selenium.common.exceptions import NoSuchElementException try: try: element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]/div/div[1]') except NoSuchElementException: try: element = driver.find_element_by_xpath('//*[@id="reviewsMedley"]') except NoSuchElementException: element = driver.find_element_by_xpath('//*[@id="detail-bullets_feature_div"]') actions = ActionChains(driver) actions.move_to_element(element).perform() except NoSuchElementException: pass def scroll_temp(driver): ''' Automated Scroller in Selenium Webbrowser PARAMETER --------- driver: Selenium Webbrowser ''' pre_scroll_height = driver.execute_script('return document.body.scrollHeight;') run_time, max_run_time = 0, 2 while True: iteration_start = time.time() # Scroll webpage, the 100 allows for a more 'aggressive' scroll driver.execute_script('window.scrollTo(0,0.6*document.body.scrollHeight);') post_scroll_height = driver.execute_script('return document.body.scrollHeight;') scrolled = post_scroll_height != pre_scroll_height timed_out = run_time >= max_run_time if scrolled: run_time = 0 pre_scroll_height = post_scroll_height elif not scrolled and not timed_out: run_time += time.time() - iteration_start elif not scrolled and timed_out: break #Opening Selenium Webdrive with Amazon product driver = browser(link) time.sleep(5) # scroll(driver) # time.sleep(2) #Initializing BeautifulSoup operation in selenium browser import requests req = requests.get(link, headers=headers) soup = BeautifulSoup(req.content, 'html.parser') time.sleep(2) #Product Title try: # product_title = driver.find_element_by_xpath('//*[@id="productTitle"]').text product_title = soup except: product_title = 'Not Scrapable' print(product_title) # #Ratings - Star # try: # rating_star = float(selenium_soup.findAll('span',{'class':'a-icon-alt'})[0].text.split()[0]) # except: # rating_star = 'Not Scrapable' # print(rating_star) # + # //*[@id="customer_review-R3QCGHM0OXNSVI"]/div[2]/a[2]/span # + # //*[@id="productTitle"]
Data Warehouse/Amazon United Kingdom/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch import pyro import pyro.distributions as dist import pyro.poutine as poutine from pyro.contrib.examples.bart import load_bart_od from pyro.contrib.forecast import ForecastingModel, Forecaster, backtest, eval_crps from pyro.infer.reparam import LocScaleReparam, StableReparam from pyro.ops.tensor_utils import periodic_cumsum, periodic_repeat, periodic_features from pyro.ops.stats import quantile import matplotlib.pyplot as plt # %matplotlib inline print( pyro.__version__) assert pyro.__version__.startswith('1.5') pyro.enable_validation(True) pyro.set_rng_seed(20200221) # - dataset = load_bart_od() print(dataset.keys()) print(dataset["counts"].shape) print(" ".join(dataset["stations"])) T, O, D = dataset["counts"].shape data = dataset["counts"][:T // (24 * 7) * 24 * 7].reshape(T // (24 * 7), -1).sum(-1).log() data = data.unsqueeze(-1) plt.figure(figsize=(9, 3)) plt.plot(data) plt.title("Total weekly ridership") plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(0, len(data)); # First we need some boilerplate to create a class and define a .model() method. class Model1(ForecastingModel): # We then implement the .model() method. Since this is a generative model, it shouldn't # look at data; however it is convenient to see the shape of data we're supposed to # generate, so this inputs a zeros_like(data) tensor instead of the actual data. def model(self, zero_data, covariates): data_dim = zero_data.size(-1) # Should be 1 in this univariate tutorial. feature_dim = covariates.size(-1) # The first part of the model is a probabilistic program to create a prediction. # We use the zero_data as a template for the shape of the prediction. bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1)) weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1)) prediction = bias + (weight * covariates).sum(-1, keepdim=True) # The prediction should have the same shape as zero_data (duration, obs_dim), # but may have additional sample dimensions on the left. assert prediction.shape[-2:] == zero_data.shape # The next part of the model creates a likelihood or noise distribution. # Again we'll be Bayesian and write this as a probabilistic program with # priors over parameters. noise_scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1)) noise_dist = dist.Normal(0, noise_scale) # The final step is to call the .predict() method. self.predict(noise_dist, prediction) T0 = 0 # begining T2 = data.size(-2) # end T1 = T2 - 52 # train/test split # %%time pyro.set_rng_seed(1) pyro.clear_param_store() time = torch.arange(float(T2)) / 365 covariates = torch.stack([time], dim=-1) forecaster = Forecaster(Model1(), data[:T1], covariates[:T1], learning_rate=0.1) # + samples = forecaster(data[:T1], covariates, num_samples=1000) p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1) crps = eval_crps(samples, data[T1:]) print(samples.shape, p10.shape) plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(data, 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(0, None) plt.legend(loc="best"); # - plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(T1, None) plt.legend(loc="best"); # %%time pyro.set_rng_seed(1) pyro.clear_param_store() time = torch.arange(float(T2)) / 365 covariates = torch.cat([time.unsqueeze(-1), periodic_features(T2, 365.25 / 7)], dim=-1) forecaster = Forecaster(Model1(), data[:T1], covariates[:T1], learning_rate=0.1) # + samples = forecaster(data[:T1], covariates, num_samples=1000) p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1) crps = eval_crps(samples, data[T1:]) plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(data, 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(0, None) plt.legend(loc="best"); # - plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(T1, None) plt.legend(loc="best"); class Model2(ForecastingModel): def model(self, zero_data, covariates): data_dim = zero_data.size(-1) feature_dim = covariates.size(-1) bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1)) weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1)) # We'll sample a time-global scale parameter outside the time plate, # then time-local iid noise inside the time plate. drift_scale = pyro.sample("drift_scale", dist.LogNormal(-20, 5).expand([1]).to_event(1)) with self.time_plate: # We'll use a reparameterizer to improve variational fit. The model would still be # correct if you removed this context manager, but the fit appears to be worse. with poutine.reparam(config={"drift": LocScaleReparam()}): drift = pyro.sample("drift", dist.Normal(zero_data, drift_scale).to_event(1)) # After we sample the iid "drift" noise we can combine it in any time-dependent way. # It is important to keep everything inside the plate independent and apply dependent # transforms outside the plate. motion = drift.cumsum(-2) # A Brownian motion. # The prediction now includes three terms. prediction = motion + bias + (weight * covariates).sum(-1, keepdim=True) assert prediction.shape[-2:] == zero_data.shape # Construct the noise distribution and predict. noise_scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1)) noise_dist = dist.Normal(0, noise_scale) self.predict(noise_dist, prediction) # %%time pyro.set_rng_seed(1) pyro.clear_param_store() time = torch.arange(float(T2)) / 365 covariates = periodic_features(T2, 365.25 / 7) forecaster = Forecaster(Model2(), data[:T1], covariates[:T1], learning_rate=0.1, time_reparam="dct", ) # + samples = forecaster(data[:T1], covariates, num_samples=1000) p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1) crps = eval_crps(samples, data[T1:]) plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(data, 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(0, None) plt.legend(loc="best"); # - class Model3(ForecastingModel): def model(self, zero_data, covariates): data_dim = zero_data.size(-1) feature_dim = covariates.size(-1) bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1)) weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1)) drift_scale = pyro.sample("drift_scale", dist.LogNormal(-20, 5).expand([1]).to_event(1)) with self.time_plate: with poutine.reparam(config={"drift": LocScaleReparam()}): drift = pyro.sample("drift", dist.Normal(zero_data, drift_scale).to_event(1)) motion = drift.cumsum(-2) # A Brownian motion. prediction = motion + bias + (weight * covariates).sum(-1, keepdim=True) assert prediction.shape[-2:] == zero_data.shape # The next part of the model creates a likelihood or noise distribution. # Again we'll be Bayesian and write this as a probabilistic program with # priors over parameters. stability = pyro.sample("noise_stability", dist.Uniform(1, 2).expand([1]).to_event(1)) skew = pyro.sample("noise_skew", dist.Uniform(-1, 1).expand([1]).to_event(1)) scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1)) noise_dist = dist.Stable(stability, skew, scale) # We need to use a reparameterizer to handle the Stable distribution. # Note "residual" is the name of Pyro's internal sample site in self.predict(). with poutine.reparam(config={"residual": StableReparam()}): self.predict(noise_dist, prediction) # %%time pyro.set_rng_seed(2) pyro.clear_param_store() time = torch.arange(float(T2)) / 365 covariates = periodic_features(T2, 365.25 / 7) forecaster = Forecaster(Model3(), data[:T1], covariates[:T1], learning_rate=0.1, time_reparam="dct") for name, value in forecaster.guide.median().items(): if value.numel() == 1: print("{} = {:0.4g}".format(name, value.item())) # + samples = forecaster(data[:T1], covariates, num_samples=1000) p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1) crps = eval_crps(samples, data[T1:]) plt.figure(figsize=(9, 3)) plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3) plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast') plt.plot(data, 'k-', label='truth') plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps)) plt.ylabel("log(# rides)") plt.xlabel("Week after 2011-01-01") plt.xlim(0, None) plt.legend(loc="best"); # - # %%time pyro.set_rng_seed(1) pyro.clear_param_store() windows2 = backtest(data, covariates, Model2, min_train_window=104, test_window=52, stride=26, forecaster_options={"learning_rate": 0.1, "time_reparam": "dct", "log_every": 1000, "warm_start": True}) # %time pyro.set_rng_seed(1) pyro.clear_param_store() windows3 = backtest(data, covariates, Model3, min_train_window=104, test_window=52, stride=26, forecaster_options={"learning_rate": 0.1, "time_reparam": "dct", "log_every": 1000, "warm_start": True}) fig, axes = plt.subplots(3, figsize=(8, 6), sharex=True) axes[0].set_title("Gaussian versus Stable accuracy over {} windows".format(len(windows2))) axes[0].plot([w["crps"] for w in windows2], "b<", label="Gaussian") axes[0].plot([w["crps"] for w in windows3], "r>", label="Stable") axes[0].set_ylabel("CRPS") axes[1].plot([w["mae"] for w in windows2], "b<", label="Gaussian") axes[1].plot([w["mae"] for w in windows3], "r>", label="Stable") axes[1].set_ylabel("MAE") axes[2].plot([w["rmse"] for w in windows2], "b<", label="Gaussian") axes[2].plot([w["rmse"] for w in windows3], "r>", label="Stable") axes[2].set_ylabel("RMSE") axes[0].legend(loc="best") plt.tight_layout()
time_series/pyro_time_series_univariate.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import gc import time import keras as k import numpy as np import pandas as pd import sklearn as sk import datetime as dt import keras.backend as K import matplotlib.pyplot as plt from keras import regularizers from keras.models import Model from keras.layers import Input, Dense, Activation from sklearn.preprocessing import MinMaxScaler from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.metrics import mean_squared_error, mean_absolute_error, \ confusion_matrix, classification_report # custom Fuzzy Layers from sofenn import SOFNN from sofenn.layers import FuzzyLayer, NormalizedLayer, WeightedLayer, OutputLayer pd.set_option('display.max_columns', None) # - # # Inspecting Data df = pd.read_csv('../data/FinalDataset.csv',index_col='date') df.info() df.head() # show closing price time series df['bitcoin_close'].plot(title='BTC Close Price',grid=True,rot=35, figsize=(12,8)) # show log-closing price time series df['bitcoin_close'].plot(title='BTC Log-Close Price',grid=True,logy=True,rot=35, figsize=(12,8)) # stats on time series length print("number of days: {:.1f}".format(df.shape[0])) print("number of months: {:.1f}".format(df.shape[0] / 30)) print("number of years: {:.1f}".format(df.shape[0] / 365)) # # Defining Functions # ## Loading and Preparing Data # + # function to read in dataset def get_data(): return pd.read_csv('../data/FinalDataset.csv',index_col='date') # + # function to drop unneeded fields def keep_only_close(df_in): drops = ['bitcoin_open', 'bitcoin_high', 'bitcoin_low', 'bitcoin_volume', 'bitcoin_market_cap'] return df_in.drop(drops, axis=1) # + # function to add predicted column for tomorrow's close price def add_prediction_field(df_in): # creating predicted value # create column to hold tomorrow's close for each day df_in['tomorrow_close'] = df_in['bitcoin_close'].shift(-1) # change in price between consecutive closing days df_in['day_change'] = df_in['tomorrow_close'] - df_in['bitcoin_close'] # indicator variable to be used for predicting higher/lower days df_in['y'] = np.where(df_in['day_change'] >= 0, 1, 0) # drop intermediate columns return df_in.drop(['tomorrow_close', 'day_change'], axis=1) # + # function to retrieve lag values to use for each feature def get_feature_lag_dict(lag_vers=1): # check value of input if lag_vers not in [1, 2, 3, 4]: raise ValueError('Incorrect Version') # read in right version of file df = pd.read_csv('../lags/optimal_v{}.csv'.format(lag_vers)) # return dictionary of features as keys and lags as values return dict(zip(df.feature, df.lags)) # + # function to add lags for daily features def add_lags(df_in, lag_vers=1): # splitting into X/y for separate processing X, y = df_in[df_in.columns[:-1]], df_in[df_in.columns[-1]] # save list of columns for lags col_list = X.columns # create output DF df_out = X.copy() # creating lags f_dict = get_feature_lag_dict(lag_vers=lag_vers) # create lag columns for each other variable for col in col_list: if col not in f_dict.keys(): continue for lag in range(int(f_dict[col])): df_out['{}_(-{})'.format(col,lag+1)] = df_out[col].shift(lag+1) # return and drop na df_out['y'] = y return df_out.dropna() # + # function to remove data before certain date def truncate_date(df_in, first_day='1999-1-1'): # set mask of Bool values mask = (pd.to_datetime(df_in.index) >= first_day) return df_in.loc[mask] # + # function to rejoin training data into X/y again def rejoin_training_data(X_train, X_test, y_train, y_test): Xs = [X_train, X_test] Ys = [y_train, y_test] df_out = pd.concat(Xs) df_out['y'] = pd.concat(Ys) return df_out # - # function to create train/test X and y def create_training_data(df_in, lag_vers=1, train_split=0.7, rescale=True, k_feat_perc=None): # split into test train X, y = df_in[df_in.columns[:-1]], df_in[df_in.columns[-1]] # determine cutoff of train/test split and split cutoff = int(X.shape[0] * train_split) X_train, y_train = X[:cutoff], y[:cutoff] X_test, y_test = X[cutoff:], y[cutoff:] # scale values to 0-1 # must only scale based on training data, to ensure no # foresight with averages if rescale: scaler = MinMaxScaler(feature_range=(0,1)).fit(X_train.values) cols = X.columns X_train = pd.DataFrame(scaler.transform(X_train.values), index=X_train.index, columns=cols) X_test = pd.DataFrame(scaler.transform(X_test.values), index=X_test.index, columns=cols) # select top K features to use if k_feat_perc: selector = SelectPercentile(chi2, percentile=k_feat_perc).fit( X_train.values,y_train.values) # index and names of remainig columns after filter cols = X_train.columns[selector.get_support(indices = True)] X_train = pd.DataFrame(selector.transform(X_train.values), index=X_train.index, columns=cols) X_test = pd.DataFrame(selector.transform(X_test.values), index=X_test.index, columns=cols) # recreate into X/y DF and add lags df_renew = rejoin_training_data(X_train, X_test, y_train, y_test) df_renew = add_lags(df_renew, lag_vers=lag_vers) # split into test train again X, y = df_renew[df_renew.columns[:-1]], df_renew[df_renew.columns[-1]] cutoff = int(X.shape[0] * train_split) X_train, y_train = X[:cutoff], y[:cutoff] X_test, y_test = X[cutoff:], y[cutoff:] # display shapes print('='*65) print('New Training Set') print('Training Set Dims: {}'.format(X_train.shape)) print('Testing Set Dims: {}'.format(X_test.shape)) print('='*65) return X_train, X_test, y_train, y_test # + # function to prep data and return DF for modeling def prepare_data(lag_vers=1, train_split=0.7, close_o=False, first_day=None, rescale=True, k_feat_perc=None): # read in dataset and initialize output df df_in = get_data() # drop non-price fields if needed if close_o: df_in = keep_only_close(df_in) # add prediction field df_in = add_prediction_field(df_in) # cut off prior to first_day if first_day: df_in = truncate_date(df_in, first_day) return create_training_data(df_in, lag_vers=lag_vers, train_split=train_split, rescale=rescale, k_feat_perc=k_feat_perc) # - # ## Preparing Data for Training # split dataset X_train, X_test, y_train, y_test = prepare_data(lag_vers=2, train_split=0.9, close_o=False, first_day='2016-7-1', rescale=True, k_feat_perc=50) X_train.shape X_test.shape y_train.shape y_test.shape # balance of dependent variable values ones = y_train.sum() zeros = y_train.shape[0] - ones print("0's: {}".format(zeros)) print("1's: {}".format(ones)) # balance of dependent variable values ones = y_test.sum() zeros = y_test.shape[0] - ones print("0's: {}".format(zeros)) print("1's: {}".format(ones)) # ## Build, Train, Test SOFNN Model sofnn = SOFNN(X_train, X_test, y_train, y_test) sofnn._train_model() y_pred = sofnn._evaluate_model() # + # function to create and train model def run_model(rules=5, batch_size = 1, epochs=5): # get shape of training data samples, feats = X_train.shape # add layers inputs = Input(name='Inputs',shape=(feats,)) fuzz = FuzzyLayer(rules) norm = NormalizedLayer(rules) weights = WeightedLayer(rules) raw = OutputLayer() # run through layers phi = fuzz(inputs) psi = norm(phi) f = weights([inputs, psi]) raw_output = raw(f) #raw_output = Dense(1, name='RawOutput', activation='linear', use_bias=False)(f) preds = Activation(name='OutputActivation', activation='sigmoid')(raw_output) # compile model and output summary model = Model(inputs=inputs, outputs=preds) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) # fit model and evaluate model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,shuffle=False) scores = model.evaluate(X_test, y_test, verbose=1) accuracy = scores[1] * 100 print("\nAccuracy: {:.2f}%".format(accuracy)) # print confusion matrix print('\nConfusion Matrix') print('='*20) y_pred = np.squeeze(np.where(model.predict(X_test) >= 0.5, 1, 0), axis=-1) print(pd.DataFrame(confusion_matrix(y_test, y_pred), index=['true:no', 'true:yes'], columns=['pred:no', 'pred:yes'])) # print classification report print('\nClasification Report') print('='*20) print(classification_report(y_test, y_pred, labels=[0,1])) return model, y_pred # - # test out various nodes for rule in [5]: print('{} Rules'.format(rule)) model, y_pred = run_model(rules=rule, batch_size=30, epochs=50) # ## Testing Model Attributes sofnn = SOFNN(X_train, X_test, y_train, y_test, neurons=7) sofnn._train_model() y_pred = sofnn._evaluate_model() model = sofnn.model #define rmse and tolerance limit E_mae = mean_absolute_error(y_test.values, y_pred) k_mae = 0.1 prune_tol = 0.4 E = max(prune_tol * E_mae, k_mae) E E_mae prune_model = sofnn.build_model() act_weights = sofnn.model.get_weights() prune_weights = prune_model.get_weights() prune_model.set_weights(act_weights) prune_new = prune_model.get_weights() sofnn.prune_neurons(y_pred) sofnn.self_organize(prune_tol=0.9, epochs=50, max_widens=50)
notebooks/modeling/keras/sofnn-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Day 3: Crossed Wires def parse_step(string): return string[0], int(string[1:]) def parse(lines): ret = list() for line in lines.split("\n"): if not line: continue items = line.split(",") ret.append([parse_step(x) for x in items]) return ret EXAMPLE = parse(""" R8,U5,L5,D3 U7,R6,D4,L4 """) REAL = parse(""" R1003,D430,L108,D570,R459,U7,L68,D232,L130,U93,R238,U951,L821,U723,L370,U873,L680,U749,R110,U17,R185,U484,R550,U356,L212,U350,L239,D208,R666,U70,L369,U448,R54,D402,R165,D375,L468,U886,L303,U779,L752,U664,L120,U643,R405,D288,L220,U727,L764,D615,R630,U688,R961,U499,L782,D852,L743,U443,R355,U856,L795,U235,L876,D511,L108,U637,R427,D338,L699,D911,L506,D607,L539,U977,R654,D634,L196,U944,R922,D774,R358,U828,L970,D386,R795,U602,R249,U793,L171,D217,L476,D123,L179,U820,R895,D239,R363,D629,L226,D811,R962,D848,R218,D581,R369,D872,L653,D281,R304,D302,R780,U636,L413,D712,L642,D886,R613,U736,L968,D82,R953,U408,L130,U654,R312,U74,L610,D798,R242,D586,L808,D664,L764,U455,R264,U384,L154,D484,R883,D276,L423,U11,L145,U156,L268,U46,R202,U641,R920,D483,R859,U94,L173,D796,R11,D328,R48,D161,L615,D396,R350,D48,R946,D233,R385,D294,R640,D301,R810,D824,L969,D469,R34,U995,R750,D827,R52,U606,R143,U868,L973,U863,L17,U995,L236,U994,R403,D312,R49,U143,L399,U821,R974,U119,R410,D233,R228,D326,R112,D512,L950,D103,L590,U80,R7,U441,L744,U643,L80,D631,L576,U680,R369,U741,L87,D748,R773,U145,R464,U546,R80,D251,L972,U414,L390,U148,L84,D481,L425,U293,L564,U880,R535,U703,R981,U944,R224,D366,R29,U517,R342,U686,L384,D650,R983,D287,L108,U713,L523,U695,R881,D126,R151,U153,R161,D791,L599,D936,L816,U387,R411,U637,L434,D22,L720,U579,L661,D644,L220,U325,R753,D392,L503,U617,R1,D956,L607,U602,L800,D749,L193,U215,L91,U733,L606,U510,L124,D550,R303,D835,R19,D326,R577,U265,L156,D924,L122,D186,R803,U3,R879 L1003,U603,L675,U828,R671,U925,R466,D707,L39,U1,R686,U946,L438,U626,R714,D365,L336,D624,R673,U672,L729,D965,R824,D533,R513,D914,R829,U275,L424,U10,L244,U158,R779,D590,R116,U714,R662,D989,R869,D547,R817,U315,R439,D29,L599,D870,L645,U656,R845,U19,R960,U669,L632,D567,L340,U856,R955,D314,R452,D896,R574,D162,R240,U302,R668,U706,R394,D24,L422,U884,R804,U576,L802,U400,R405,U676,L344,D628,R672,U580,R710,U536,L712,U738,L266,D212,R552,D229,R265,U835,R152,U784,L478,D87,L783,D327,R728,U590,R408,D397,R363,D654,R501,D583,R445,U897,R888,D480,R455,U593,R906,D506,R985,D361,R361,D619,L462,D873,L248,D348,R540,D578,L274,D472,R254,U647,R54,U681,L33,U343,R913,U120,L64,D849,L953,U407,L64,U744,L482,U240,L82,U69,R480,D796,L137,U527,R428,U67,R123,U688,L985,D944,R583,D804,R331,U328,R906,U376,L966,U433,R863,D931,L315,D9,L77,D141,L738,D661,R742,D44,R383,U78,R106,D301,L186,U907,L304,U786,L256,U718,R861,D145,R694,D721,R607,D418,R358,U600,R228,D139,R476,D451,L49,U616,L491,U8,R371,D735,R669,U388,L905,D282,L430,U491,L775,U891,L831,U350,L247,D609,R489,U266,R468,D748,R134,U187,R882,D315,R344,D363,R349,U525,R831,U644,R207,D563,L1,D946,L559,U789,L187,U370,L284,D910,L394,D560,L705,U661,R272,U109,L12,D554,L670,D169,L375,D100,R382,D491,L53,D916,R152,U82,L236,U845,L860,U732,R327,D190,R888,U722,R770,U993,R509,D970,L225,D756,R444,D992,L746,D35,R329,D452,R728,U575,L325,U414,L709,D844,R692,D575,R132,D520,L506,D384,L581,U36,L336,U849,L944,U450,R138,D186,L613,U805,R32,U763,R210,U556,R125,D499,R729 """) EXAMPLE vectors = {'R': (1,0), 'L': (-1,0), 'U': (0,1), 'D': (0,-1)} def draw_steps(pos, move): ret = dict() vector = vectors[move[0]] distance = move[1] for i in range(distance): pos = (pos[0]+vector[0], pos[1]+vector[1]) ret[pos] = 1 return pos, ret draw_steps((1,1), ('R', 3)) def draw_wire(pos, moves): ret = dict() pos = (0,0) for m in moves: pos, steps = draw_steps(pos, m) ret.update(steps) return ret draw_wire((0,0), [('R', 8), ('U', 5), ('L', 5), ('D', 3)]) def crossings(wires): board = set() crossings = set() central = (0,0) for w in wires: path = draw_wire(central,w) for xy in path.keys(): if xy in board: crossings.add(xy) board.add(xy) return crossings crossings(EXAMPLE) def nearest_crossing(wires): return min(abs(x)+abs(y) for (x,y) in crossings(wires)) nearest_crossing(EXAMPLE) nearest_crossing(REAL) # # Part 2 def draw_steps(start_dist, pos, move): ret = dict() vector = vectors[move[0]] distance = move[1] for i in range(distance): pos = (pos[0]+vector[0], pos[1]+vector[1]) ret[pos] = start_dist+i+1 return start_dist+distance, pos, ret draw_steps(2, (1,1), ('R', 3)) def draw_wire(pos, moves): ret = dict() pos = (0,0) dist = 0 for m in moves: dist, pos, steps = draw_steps(dist, pos, m) ret.update(steps) return ret draw_wire((0,0), [('R', 8), ('U', 5), ('L', 5), ('D', 3)]) def crossings(wires): board = dict() crossings = dict() central = (0,0) for w in wires: path = draw_wire(central,w) for xy,d in path.items(): if xy in board: d2 = board[xy] + d crossings[xy] = d2 board[xy] = d2 else: board[xy] = d return crossings crossings(EXAMPLE) def shortest_crossing(wires): min_d = 100000 best = (0,0) for xy,d in crossings(wires).items(): if d < min_d: min_d = d best = xy return best, min_d shortest_crossing(EXAMPLE) shortest_crossing(REAL)
day03/python/qznc/AOC-2019-03.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.4 64-bit # name: python374jvsc74a57bd0021d9f4f6a0c9e23e32c4246ac82593951ffad9baab3e58c0c69e8a8c06b339b # --- # # Python | day 6 | while # With the **while** loop we can execute a set of statements as long as a condition is true. # ```python # i = 1 # while i < 6: # print(i) # i += 1 # ``` # https://www.w3schools.com/python/python_while_loops.asp # ### Exercise 1. # Do as described below: # # - Make a variable called strength, and set its initial value to 5. # # - Print a message reporting the player's strength. # # - Set up a while loop that runs until the player's strength increases to a value such as 10. # # - Inside the while loop, print a message that reports the player's current strength. # # - Inside the while loop, write a statement that increases the player's strength. # # - Outside the while loop, print a message reporting that the player has grown too strong, and that they have moved up to a new level of the game. # # Play around with different cutoff levels for the value of strength, and play around with different ways to increase the strength value within the while loop. # strengh = 5 print ("playe's initial strengh is 5") while strengh < 11: print("playe's initial strengh is ", strengh) strengh += 1 print("Level Up !!") strengh = 5 print ("playe's initial strengh is 5") while strengh < 31: print("playe's initial strengh is ", strengh) strengh += 2 print("Level Up x3 !!") # ### Exercise 2. # To solve the following exercises it will be necessary to define a function for each section. You choose the name of those functions unless specified, but make sure you define a single parameter `whichever_list`, when calling the function it will correspond to the variable `info_list`. # you can use this list or take yours from Practice 3 info_list = [15, 'Recoletos', True, ['Recoletos', 15], None, '8'] # You have already read these questions below, but this time you are asked to solve it using `while` loop instead of `for`. # # - Loop through `whichever_list` to print all the elements it contains. This function should return the list turned into a tuple. # - Loop through `whichever_list` to print all the elements it contains concatenated with the string` "--> OK" `. This function should return a message saying, `"everything will be ok"`. # - Loop through `whichever_list` to display all items it contains except the first one. This function should return the first element of the list. # - Loop through `whichever_list` to display all the items it contains except the last one. This function should return the last element of the list. # + info_list = [15, 'Recoletos', True, ['Recoletos', 15], None, '8'] def funcion1(whichever_list): contador = 0 while contador < len(whichever_list): print(whichever_list[contador]) contador += 1 x = tuple(whichever_list) return x funcion1(whichever_list=info_list) # + tags=[] info_list = [15, 'Recoletos', True, ['Recoletos', 15], None, '8'] def funcion1(whichever_list): contador2 = 0 while contador2 < len(whichever_list): print (str(whichever_list[contador2]) + "----> Ok") contador2 += 1 return "everything will be ok" funcion1(whichever_list=info_list) # + info_list = [15, 'Recoletos', True, ['Recoletos', 15], None, '8'] def funcion1(whichever_list): contador3 = 0 while contador3 < len(whichever_list): if contador3 == 0: pass contador3 += 1 else: print (str(whichever_list[contador3])) contador3 += 1 return whichever_list[0] funcion1(whichever_list=info_list) # + def funcion1(whichever_list): contador3 = 0 while contador3 < len(whichever_list): if contador3 == int(len(whichever_list)) - 1: pass contador3 += 1 else: print (str(whichever_list[contador3])) contador3 += 1 return whichever_list[int(len(whichever_list)) - 1] funcion1(whichever_list=info_list) # - # ### Exercise 3. # 1. Define a function that performs the [Fibonacci series](https://quantdare.com/numeros-de-fibonacci/#:~:text=En%20matem%C3%A1ticas%2C%20la%20sucesi%C3%B3n%20o,nombre%20de%20n%C3%BAmero%20de%20Fibonacci.) with a `while`. # + hasta = 500 def fibonacci (hasta): start = 1 n = 1 while start < hasta: n = (n-1) + n n += 1 print(n) fibonacci(hasta) # - # ### Bonus Track. # To solve the following exercises it will be necessary to define a function for each section. You choose the name of those functions unless specified, but make sure you define a single parameter `whichever_list`, when calling the function it will correspond to the variable `info_list`. # you can use this list or take yours from Practice 3 info_list = [15, 'Recoletos', True, ['Recoletos', 15], None, '8'] # 1. Define a function `f_s` that saves the value of a word (for example: "nothing") in a variable and print it. Also, concatenate its value with the street number of The Bridge and display it on the screen. # # Also, display a boolean **True** if the length of the concatenated word is 6. If not, it will be **False**. The function must return the value **True** or **False**. # 2. Create a list `list_aux` with 7 elements, whichever you want. Next, create a function `function_use_f_s` which receives as parameters a function `some_function`, a word which is the argument of `some_funtion` and whichever list `whichever_list`. If the value of `some_function` is **True**, then delete an item from `whichever_list`. Repeat the process until `whichever_list` has no elements. The argument for `whichever_list` must be `list_aux` and for `some_function` it will be the function `f_s` from the previous point. # 3. Define a function `even_numbers` that has an input parameter `arg1`, type integer. The function must return a list of numbers of size `arg1`, all of them being even numbers. # # Don't use the function `print()` inside the function, but print what `even_numbers` returns when calling it with the argument `10`, just to check your function works. # # ```python # print(even_numbers(10)) # ``` # 4. Create a function that has an input variable `arg1`, which will be the list of even numbers from the previous section, and that removes elements from the list, one by one, if `arg1` has a size divisible by 2 and, if not, it adds just once, the value of the function `f_s` that should be a boolean (`True` or `False`). # # !['nicetry'](https://i.pinimg.com/originals/36/ab/81/36ab81cd8d63cf7c4a08f39403698c77.jpg)
week2_precurse_python_II/day1_python_V/Practice_7_while.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COOKIE AMIP cloud on - cloud off: change in precipitation and circulaton metrics # # This notebook reproduces Table 1. # # Data: COOKIE amip on and off simulations, time-avergaed between years 1980 and 2008. Data is used on the models' native horizontal grids. # ### Load libraries import xarray as xr import numpy as np # Functions for calculating ITCZ and Jet: import metrics as mt # For reference, print package versions to screen: print('xarrary: ', xr.__version__) print('numpy: ', np.__version__) # ### Load data models=['CNRM-CM5', 'HadGEM2-A', 'IPSL-CM5A-LR', 'MPI-CM5', 'MRI-CGCM3'] # Precipitation and zonal wind at 850 hPa: # + def load_pr_ua850(var, varname, exp): ds_list = [] for mod in models: ds_list.append( ( xr.open_dataset('../../data/cookie/'+var+'_'+mod+'_'+exp+'.timmean_1980-2008.nc') [varname].mean('lon').squeeze() ) ) return ds_list p_on = load_pr_ua850('pr' , 'pr', 'amip' ) p_off = load_pr_ua850('pr' , 'pr', 'offamip_mm') u_on = load_pr_ua850('ua850', 'ua', 'amip' ) u_off = load_pr_ua850('ua850', 'ua', 'offamip_mm') # - # Mass stream function calculated by cdo mastrfu: # + def load_msf(exp): ds_list = [] for mod in models: ds = ( xr.open_dataset('../../data/cookie/msf_'+mod+'_'+exp+'.timmean_1980-2008.nc') ['mastrfu'].squeeze() ) if mod == 'MPI-CM5': # rename lev to plev to be consistent with other models ds = ds.rename({'lev': 'plev'}) ds = ds/1.0e9 # adapt units of mass stream function ds_list.append(ds) return ds_list msf_on = load_msf('amip' ) msf_off = load_msf('offamip_mm') # - # ### Calculate precipitation and circulation metrics # #### 1. Change in regional precipitation # + dp_glob = [] # global mean dp_trop = [] # tropical mean dp_extr = [] # extratropical mean for i in range(len(p_on)): # define surface area weights weights = p_on[i] + np.cos(np.deg2rad(p_on[i].lat.values)) # global mean aux = 86400*(p_on[i].values - p_off[i].values) dp_glob.append( np.average(aux, weights=weights) ) # tropical mean aux = 86400*(p_on[i].sel(lat=slice(-30,30)).values - p_off[i].sel(lat=slice(-30,30)).values) dp_trop.append( np.average(aux, weights=weights.sel(lat=slice(-30,30))) ) # extratropical mean aux = 86400*(p_on[i].sel(lat=slice(-90,-30)).values - p_off[i].sel(lat=slice(-90,-30)).values) sh = np.average(aux, weights=weights.sel(lat=slice(-90,-30))) aux = 86400*(p_on[i].sel(lat=slice(30,90)).values - p_off[i].sel(lat=slice(30,90)).values) nh = np.average(aux, weights=weights.sel(lat=slice(30,90))) dp_extr.append( 0.5*(nh+sh) ) # - # #### 2. Change in ITCZ position ditcz = [] for i in range(len(p_on)): itcz_on = mt.get_itczposition(p_on[i], p_on[i].lat ) itcz_off = mt.get_itczposition(p_off[i], p_off[i].lat) ditcz.append(itcz_on-itcz_off) # #### 3. Change in Hadley cell strength dhc_sh = [] dhc_nh =[] for i in range(len(models)): dhc_sh.append( -1* ( msf_on[i].sel(lat=slice(-30,0), plev=slice(850e2,200e2)).max().values - msf_off[i].sel(lat=slice(-30,0), plev=slice(850e2,200e2)).max().values ) ) dhc_nh.append( msf_on[i].sel(lat=slice(0,30), plev=slice(850e2,200e2)).max().values - msf_off[i].sel(lat=slice(0,30), plev=slice(850e2,200e2)).max().values ) # #### 4. Change in jet position and strength # + djetpos_nh = [] # NH jet position djetpos_sh = [] # SH jet position djetmag_nh = [] # NH jet strength djetmag_sh = [] # SH jet strength for i in range(len(u_on)): jetpos_sh_on , jetmag_sh_on , jetpos_nh_on , jetmag_nh_on = mt.get_eddyjetlatint(u_on[i] , u_on[i].lat ) jetpos_sh_off, jetmag_sh_off, jetpos_nh_off, jetmag_nh_off = mt.get_eddyjetlatint(u_off[i], u_off[i].lat) djetpos_nh.append( jetpos_nh_on-jetpos_nh_off) djetpos_sh.append(-1*(jetpos_sh_on-jetpos_sh_off) ) djetmag_nh.append(jetmag_nh_on-jetmag_nh_off) djetmag_sh.append(jetmag_sh_on-jetmag_sh_off) # - # ### Print to screen in latex-ready format print('model Ptrop Pextr Pglob ITCZ NHHC SHHC NHJetpos SHJetpos NHJetmag SHJetmag') for i in range(len(models)): print('{:12s}'.format(models[i]), ' & ', '{:3.2f}'.format(dp_trop[i].round(decimals=2)), ' & ', '{:3.2f}'.format(dp_extr[i].round(decimals=2)), ' & ', '{:3.2f}'.format(dp_glob[i].round(decimals=2)), ' & ', '{:1.1f}'.format(ditcz[i].round(decimals=1)) , ' & ', '{:3.0f}'.format(dhc_nh[i].round(decimals=0)) , ' & ', '{:3.0f}'.format(dhc_sh[i].round(decimals=0)) , ' & ', '{:4.1f}'.format(djetpos_nh[i].round(decimals=1)), ' & ', '{:4.1f}'.format(djetpos_sh[i].round(decimals=1)), ' & ', '{:4.1f}'.format(djetmag_nh[i].round(decimals=1)), ' & ', '{:4.1f}'.format(djetmag_sh[i].round(decimals=1)), ' \\') # ### Illustration of the flat meridional profile of zonal wind in IPSL-CM5A-LR and MRI-CGCM3, which leads to the large diagnosed NH jet shifts, MPI-CM5 for comparison import matplotlib.pyplot as plt # + plt.figure(figsize=(12,4)) # IPSL-CM5A-LR plt.subplot(1,3,1) plt.title(models[2]) plt.plot(u_on[2].lat, u_on[2].values) plt.plot(u_on[2].lat, u_off[2].values) plt.xlim(30,50) plt.ylim(0,10) plt.ylabel(r'zonal wind / ms$^{-1}$'); plt.xlabel('deg latitude'); # MRI-CGCM3 plt.subplot(1,3,2) plt.title(models[4]) plt.plot(u_on[4].lat, u_on[4].values) plt.plot(u_on[4].lat, u_off[4].values) plt.xlim(30,50) plt.ylim(0,10) plt.xlabel('deg latitude'); # MPI-CM5 plt.subplot(1,3,3) plt.title(models[3]) plt.plot(u_on[3].lat, u_on[3].values) plt.plot(u_on[3].lat, u_off[3].values) plt.xlim(30,50) plt.ylim(0,10) plt.xlabel('deg latitude');
tables/table-1/make_table-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Install require packages # !pip install numpy # !pip install pandas # !pip install seaborn # !pip install plotly # !pip install scikit-learn import numpy as np import pandas as pd import json from pandas.io.json import json_normalize import matplotlib.pyplot as plt import seaborn as sns import glob, os from datetime import date, timedelta import plotly import plotly.graph_objects as go import plotly.express as px from plotly.subplots import make_subplots # %matplotlib inline import warnings warnings.filterwarnings('ignore') pd.set_option('display.max_colwidth', -1) # - # ## Design : ibex <br /> <br /> Platform: sky130hd # # A sample dataset with multiple runs varying the design utilization and the layer_adjust parameters for the routing layers. All design metrics from the runs are collected for analysis. # # Metrics data is represented as json files in the METRICS2.1 format. Each experiment in the run is a separate json file. All of the files are read into a DataFrame 'json_df'. # # * Rows of the DataFrame represent an experiment. # * Columns of the DataFrame represent the metrics. path = './metrics' json_df = pd.DataFrame() for filename in glob.glob(os.path.join(path, '*.json')): with open(os.path.join(os.getcwd(), filename), 'r') as f: data = f.read() data_json = json_normalize(json.loads(data)) json_df = json_df.append(data_json) print(f'Number of runs in the dataset: {json_df.shape[0]}') print(f'Number of metrics in each data: {json_df.shape[1]}') # * The columns in the DataFrame, which are the metrics colleced for each run. json_df.columns # **Data Engineering** # * Extract only the relevant columns -- for this exercise, get the design name, flow variant and route metrics. # * Parse Variant and extract the relevant features -- Utilization, layer adjust for each routing layer. # * Rename columns. # * Create new columns (features) based on other column values -- for e.g. Success/Fail for the router. # Data Engineering, extract the relevant metrics metrics_df = json_df[['run.flow__design', 'run.flow__variant', 'globalroute.timing__setup__ws', 'detailedroute.route__wirelength', 'detailedroute.route__via__count', 'detailedroute.route__drc_errors__count', 'detailedroute.runtime__total' ]] metrics_df = metrics_df.rename({ 'run.flow__design' : 'Design', 'run.flow__variant' : 'Variant', 'globalroute.timing__setup__ws' : 'GR WNS', 'detailedroute.route__wirelength' : 'Wire Length', 'detailedroute.route__via__count' : 'Vias', 'detailedroute.route__drc_errors__count' : 'DRC Errors', 'detailedroute.runtime__total' : 'Route Runtime' }, axis=1) # * Print the head of the head of the data frame. As we can see, all of the parameter values used in the experiment is in the "Variant" string. We will have to parse the string and create individual columns for the features we are interested in. metrics_df.head() # + metrics_df['GR Success'] = metrics_df['Success'] = True metrics_df.loc[metrics_df['GR WNS'] == 'N/A','GR Success'] = False metrics_df.loc[metrics_df['GR WNS'] == 'ERR','GR Success'] = False metrics_df.loc[metrics_df['DRC Errors'] == 'ERR','Success'] = False metrics_df.loc[metrics_df['Wire Length'] == 'N/A','Success'] = False metrics_df.loc[metrics_df['Wire Length'] == 'ERR','Success'] = False metrics_df.loc[metrics_df['Route Runtime'] == 'N/A','Success'] = False # + #metrics_df[(metrics_df['GR Success'] == True) & (metrics_df['Success'] == False)] # + #metrics_df[(metrics_df['Route Runtime'] == 'N/A') & (metrics_df['Success'] == True)] # - def parse_variant(val): variants = val.split("-") v_dict = dict() for i in range(len(variants)): key_val = variants[i].rsplit('_',1) v_dict[key_val[0]] = float(key_val[1]) return pd.Series([v_dict['CORE_UTIL'], v_dict['M1'], v_dict['M2'], v_dict['M3'], v_dict['M4'], v_dict['M5']]) metrics_df[['Core Util', 'M1 Adjust', 'M2 Adjust', 'M3 Adjust', 'M4 Adjust', 'M5 Adjust']] = metrics_df['Variant'].apply(parse_variant) metrics_df['Weighted Adjust'] = (metrics_df['M1 Adjust'] + metrics_df['M2 Adjust'] + metrics_df['M3 Adjust'] + metrics_df['M4 Adjust'] + metrics_df['M5 Adjust']) / 5 # * Extract individual features from the "Variant" string. # * Print the head of the dataframe after extracting the features and performing further data engineering. This data frame is now ready to be used. Notice that we have also created additional columns for capturing whether Global Route and Detailed Route completed. We have also caluclated a 'Weighted Adjust' column that is a simple mean of the 'layer adjust' for each of the routing layers. metrics_df.head() success_df = metrics_df[metrics_df['Success'] == True] success_df = success_df.astype({'Design' : 'string', 'Variant' : 'string', 'GR WNS' : 'float', 'Vias' : 'int', 'DRC Errors' : 'int', 'Wire Length' : 'float', 'Route Runtime' : 'string'}) def convert_runtime_str(rstr): rstr = rstr.split('.')[0] hms = rstr.split(':') if len(hms) == 3: runtime = int(hms[0]) * 3600 + int(hms[1]) * 60 + int(hms[2]) elif len(hms) == 2: runtime = int(hms[0]) * 60 + int(hms[1]) elif len(hms) == 1: runtime = int(hms[0]) return runtime success_df['Runtime'] = success_df['Route Runtime'].apply(convert_runtime_str) # * Print the statistics for each of the entries in the dataframe. success_df.describe().transpose() # * We can observe a huge variation in runtime for the different runs and also a substantial variation in the routed wirelength. We would like to observe the relation between the various parameters on both runtime and routed wirelength. # #### Print some of base metrics from the data set. # * Number of success/failures. # * Min, Max of Wirelength, number of vias, drc errors. #metrics_df['Success'].value_counts() print(f'Number of successful runs: {metrics_df[metrics_df["Success"] == 1].shape[0]}') print(f'Number of failed runs: {metrics_df[metrics_df["Success"] == 0].shape[0]}') min_wire_length = success_df['Wire Length'].min() max_wire_length = success_df['Wire Length'].max() print(f"Min Wire Length: {min_wire_length}, Max Wire Length: {max_wire_length}") min_vias = success_df['Vias'].min() max_vias = success_df['Vias'].max() print(f"Min Vias: {min_vias}, Max Vias: {max_vias}") min_drc_errors = success_df['DRC Errors'].min() max_drc_errors = success_df['DRC Errors'].max() print(f"Min DRC Errors: {min_drc_errors}, Max DRC Errors: {max_drc_errors}") s_df = metrics_df[metrics_df['Success'] == 1].groupby(['Core Util'], as_index = False)['Success'].count() f_df = metrics_df[metrics_df['Success'] == 0].groupby(['Core Util'], as_index = False)['Success'].count() #s_df.groupby('Core Util')['Success'].value_counts().plot(kind = "bar", stacked=True, figsize= (10,6)) # * Plot the number of successful runs and number of failures with respect to core utlization. We can see that the trends are what we would expect. As utilization increases, the number of successful runs decreases and the number of failure runs increases. fig = go.Figure() fig = make_subplots(rows=1, cols=2, subplot_titles=('Success', 'Failure'), shared_xaxes=True, horizontal_spacing=0.1) fig.add_trace(go.Bar(x=s_df['Core Util'], y = s_df['Success'], marker = dict(color="green")), row=1, col=1) fig.add_trace(go.Bar(x=f_df['Core Util'], y = f_df['Success'], marker = dict(color = "red")), row=1, col=2) fig.update_layout(width=1000, height=400, title = 'Number of Success and Failures at different Utilizations', title_x = 0.3, margin = dict(l=5, r=50, b=60, t=80, pad=4), showlegend = False) fig.show() # * We can observe that at utilization values of above 38, there is a drastic increase in the number of failures. We also observe that for the range of layer_adjust settings that we are using, it does not have a direct impact on the number of successful and doomed runs. Choosing higher layer adjust values will show a more direct correlation to success/doomed runs. # <br /> # # * Generate a scatter plot of Wirelength Vs Core Utilization. We can see the trend where the wirelength decreases with increased utilization. However, at a certain utilization value the wirelength starts to increase due to the router trying more detours to resolve DRC errors. # + fig = go.Figure() fig.add_trace(go.Scatter(x = success_df['Core Util'], y = success_df['Wire Length'], hovertext = success_df['Weighted Adjust'], mode = 'markers')) fig.update_layout(width=1000, height=400, title = 'Wire Length Vs Core Utilization', title_x = 0.5, margin = dict(l=5, r=50, b=60, t=80, pad=4), showlegend = False) fig.show() # - # * Let us now examine how the route runtime varies with utilization. # + fig = go.Figure() fig.add_trace(go.Scatter(x = success_df['Core Util'], y = success_df['Runtime'], hovertext = success_df['Weighted Adjust'], mode = 'markers')) fig.update_layout(width=1000, height=400, title = 'Runtime Vs Core Utilization', title_x = 0.5, margin = dict(l=5, r=50, b=60, t=80, pad=4), showlegend = False) fig.show() # - # * We can see that the runtime is fairly flat for smaller utilizations. As the utilization value goes above 35 the runime starts to degrade and rise exponentially. # # * Let's now examine the number of DRC errors. # + fig = go.Figure() fig.add_trace(go.Scatter(x = success_df['Core Util'], y = success_df['DRC Errors'], hovertext = success_df['Runtime'], mode = 'markers')) fig.update_layout(width=1000, height=400, title = 'DRC Errors Vs Core Utilization', title_x = 0.5, margin = dict(l=5, r=50, b=60, t=80, pad=4), showlegend = False) fig.show() # - # * Once again, we see an expected trend -- the number of DRC errors is 0 for lower utilizations and starts to increase with utilizations above 35. # #### Logistic Regression # # * Now let use build a logistic regression model to predict successful or dommed runs for this design based on input parameters of utilization and layer adjust values. # + from sklearn import linear_model X = metrics_df[['Core Util']] y = metrics_df['Success'] regr = linear_model.LinearRegression() regr.fit(X, y) predicted_success = regr.predict([[20]]) # - feature_cols = ['Core Util', 'M1 Adjust', 'M2 Adjust', 'M3 Adjust', 'M4 Adjust', 'M5 Adjust'] X = metrics_df[feature_cols] y = metrics_df['Success'] from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=0) log_reg = LogisticRegression() log_reg.fit(X_train, y_train) y_pred = log_reg.predict(X_test) from sklearn import metrics # * Print the confusion matrix for the trained model confusion_matrix = metrics.confusion_matrix(y_test, y_pred) confusion_matrix print(f'Model Accuracy: {metrics.accuracy_score(y_test, y_pred):.4f}') print(f'Model Precision: {metrics.precision_score(y_test, y_pred):.4f}') print(f'Recall Score: {metrics.recall_score(y_test, y_pred):.4f}') # * The above model is very accurate. Now check to see the accuracy of the Weighted Adjust parameter as the sole predictor # + feature_cols = [ 'Core Util', 'Weighted Adjust'] X = metrics_df[feature_cols] y = metrics_df['Success'] X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=0) log_reg = LogisticRegression() log_reg.fit(X_train, y_train) y_pred = log_reg.predict(X_test) confusion_matrix = metrics.confusion_matrix(y_test, y_pred) print(confusion_matrix) print(f'Model Accuracy: {metrics.accuracy_score(y_test, y_pred):.4f}') print(f'Model Precesion: {metrics.precision_score(y_test, y_pred):.4f}') print(f'Recall Score: {metrics.recall_score(y_test, y_pred):.4f}') # - # * For this dataset, the weighted layer_adjust seem to have similar accuracy as the individual layer adjusts, as within the range of the layer adjusts, the core utilization has more direct impact on success or failure. # * Now for runs that are successful, let us predict the wirelength based on parameters using a simple linear regression. # + #success_df # + from sklearn import linear_model feature_cols = ['Core Util', 'M1 Adjust', 'M2 Adjust', 'M3 Adjust', 'M4 Adjust', 'M5 Adjust'] X = success_df[feature_cols] y = success_df['Wire Length'] regr = linear_model.LinearRegression() regr.fit(X, y) test_params = [34, 0.1, 0.1, 0.1, 0.1, 0.5] predicted_wire_length = regr.predict([test_params]) actual_wire_length = success_df[((success_df['Core Util'] == 34) & (success_df['M1 Adjust'] == 0.1) & (success_df['M2 Adjust'] == 0.1) &(success_df['M3 Adjust'] == 0.1) & (success_df['M4 Adjust'] == 0.1) & (success_df['M5 Adjust'] == 0.5))].iloc[0]['Wire Length'] print(f'Predicted Wire Length for input parameters: {test_params} is {predicted_wire_length[0]:.2f}') print(f'Actual Wire Length: {actual_wire_length}') # - # !jupyter nbconvert --to html --no-input --TemplateExporter.exclude_input=True --no-prompt METRICS2.1-fine-grain.ipynb
experiments/sky130hd__ibex_core__sample_fine_grained/METRICS2.1-fine-grain.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Discrete-time systems # This part contains introductory material for discrete-time systems and also material on important properties of discrete-times systems. # # ## Notebooks # 1. [Introduction to discrete-time systems](notebooks/Discrete-time-systems-intro.ipynb) # # ## Good to watch # - [Digital Control - lectures by <NAME>](https://www.youtube.com/watch?v=XuR3QKVtx-g&list=PL1pxneANaikB-JscZzyRUSVZC8k8e9FWG) A short, but nice series of online lectures. We will be using these during the course. # # ## Good to read # - [Overview of computer-controlled systems](http://www.ifac-control.org/publications/list-of-professional-briefs/pb_wittenmark_etal_final.pdf) By Åström, Wittenmark and Årzen.
discrete-time-systems/.ipynb_checkpoints/README-Discrete-time-systems-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # 2.5. Dask Distributed # # Dask `distributed` is a separate package that addresses the limitations of Dask's built-in schedulers: # # 1. Both multithreading and multiprocessing schedulers are *local* schedulers (i.e., on your laptop), so they are limited to memory of a single machine. # 2. The multithreading scheduler is difficult to scale too far beyond the number of cores (or 2x the number of cores with hyperthreading) # 3. The multithreading scheduler can be limited by the GIL (though not with Numpy). # 4. The multiprocessing only allows communications between the scheduler and the workers, not between workers (i.e., hub-and-spoke communication topography). # # Dask `distributed` provides a solution to all of these problems: # # 1. uses TCP (`tornado`) to communicate between processes, so can be truly distributed (and not local), # 2. is limited only by the size of the available "cluster" (but can actually be multi-cluster!), # 3. is multi-process-based, so it doesn't contend with the GIL, and # 4. provides worker-to-worker communication, allowing for efficient and complex task graphs. # # [Go here](https://distributed.readthedocs.io/en/latest/) for more information about `distributed`. import dask import time # ## How to Set Up a (Local) Dask Distributed Cluster # # The easiest way to start a cluster is directly from your notebook using the `Client` object. # + from dask.distributed import Client # On your laptop or GCE, run the following from dask.distributed import LocalCluster cluster = LocalCluster(n_workers=4) # On Cheyenne, run the following #from dask_jobqueue import PBSCluster #cluster = PBSCluster(queue='premium', project='STDD0006', processes=1, threads=1, resource_spec='select=1:ncpus=36') #cluster.start_workers(4) client = Client(cluster) client # - # ### LocalCluster # # If you are running on your laptop, you are using just a "local cluster." But you can see that launching a cluster on any other machine (such as Cheyenne) is similar. You create the appropriate `*Cluster` object and pass this to the `Client`. The `dask_jobqueue` package (which must be installed separately with: # # pip install git+https://github.com/dask/dask-jobqueue.git # # in your `pangeo` conda environment) provides additional `*Cluster` objects for `PBSCluster` and `SLURMCluster`, for managing clusters using the PBS and SLURM job schedulers, respectively. client # ### Dashboard # # Many of the diagnostics that we've already seen in the last section are available from the **Dashboard** link in the information box returned when we displayed `client` in the above output. If you click that link, it will take you to a page that provides many of those diagnostics in one place! # # Each kind of task on the *Status* page of the Dashboard is displayed in block, with start and stop times roecorded for each of the following: # # 1. Serialization (gray) # 2. Dependency gathering from peers (red) # 3. Disk I/O to collect local data (orange) # 4. Execution times (additional colors chosen for each task) # # displayed in the *Task Stream* section of the Dashboard. # ### There are many ways to set up a cluster... # # For more ways to set up a cluster with Dask, [click here](https://distributed.readthedocs.io/en/latest/setup.html). # ## The Client # # The `Client` object provides an interface with the main scheduler for your cluster. It provides a number of functions that you can use directly to run code on the cluster (instead of just the scheduler). Some of these functions and attributes are described below. # ### Map # # Satisfying the `concurrent.futures` standard, you can `map` a function across an iterative object. The result of `client.map` is a `Future` object that is stored on the worker. def dbl(x): time.sleep(1) return 2*x # Now, let's apply this new `dbl` function to an iterable object (`range`) on the distributed workers... doubles = client.map(dbl, range(80)) doubles[:4] # **QUICKLY, go check out the Dashboard while this is running...** doubles[:4] type(doubles) # At this point, we have a list of `Future` objects in our notebook. # # *Keep in mind* that our notebook is attached to the *scheduler*. These `Future` objects point to objects that are stored (in the memory) of the *workers*! So, we have automatically distributed the data produced by `range(80)` onto the workers by applying `map`. # ### Submit # # Now that the data is on the workers, we can apply functions to the entire distributed dataset by using the `submit` method of the `Client`. sum_doubles = client.submit(sum, doubles) display(sum_doubles) time.sleep(8) display(sum_doubles) # ### Gather # # With the `doubles` data stored on the workers, we can bring that data to the notebook/scheduler with the `gather` method. print(client.gather(doubles)) # #### NOTE: # # Do not gather unless you absolutely must! It is usually much more efficient to keep the data distributed across the cluster and `submit` functions that act on the distributed data than it is to bring the data to the scheduler. # ### Scatter # # You can do the reverse of `gather` if you want to distribute data that you already have on your scheduler. dist_data = client.scatter(range(4000)) dist_data[:5] dist_data[4].result() # ### Compute # # Like the previous discussions, you can use the `Client` `compute` method to force the computation of a task graph created by using Dask Delayed, for example. @dask.delayed def dbl(x): time.sleep(1) return 2*x @dask.delayed def inc(x): time.sleep(1) return x + 1 @dask.delayed def dsum(x): time.sleep(1) return sum(x) data = [2, 5, 7, 3, 1, 8, 6, 9] # %time sum_odd_data = dsum( inc(dbl(x)) for x in data ) sum_odd_data # %time result = client.compute(sum_odd_data) result # #### NOTE: # # The return value of `Client.compute` is a `Future` object! That means that, unlike in the previous sections, the `compute` (and `persist`) operations will be done asynchronously! # # And once the result has been computed, then you can get the result of the `Future` with the `result()` method. result.result() # ### Persist # # Similarly, you can `persist` the results of a tash graph (just like in the previous sections) by using the `Client` `persist` method. # # Because `Client.persist` is asynchronous, we can set up initial data that is distributed *in memory* across the cluster using the `persist` method. # ### Cancel & Restart & Close # # You can cancel a computation on the cluster by using the `Client.cancel` method and passing it the `Future` associated with the computation result. client.cancel(result) result # And you can completely kill all `Future`s and restart the cluster with `restart`: client.restart() # And you can close down a cluster with the `close` method. client.close() client # ## Dask Delayed and Collections (next lesson) # # Once you have initialized a `Client` (i.e., connected to the scheduler), normal Dask features like Dask Delayed, Dask Array, etc. will work using the client's scheduler by default. So, no special mechanism is needed to get all of the previous examples to work with `distributed`. # ## Advice for Efficiency # # When running with the `distributed` scheduler, there are a few things you should consider for the sake of efficiecy: # # 1. As mentioned above, avoid `gather`ing the data to the scheduler. Keep the data distributed! # 2. Avoid creating too many tasks that need to be distributed across the cluster! (The `distributed` scheduler adds about *1 ms overhead for each task*, depending upon the network. Thus, as a rule of thumb, don't worry about distributing your tasks if the serial operation takes less than about 100 ms to run.) # 3. With the `distributed` scheduler, you can configure workers to use multiple threads *per worker*. (For example, you can create one worker for each remote node on a cluster, and each worker can run as many threads as cores on that node.) # 4. Tasks are assigned to workers using *heuristics*, so keep in mind that Dask might not get it perfect.
notebooks/2.5.dask_distributed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Madinanachan/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/module1-statistics-probability-and-inference/Mari_Dominguez_LS_DS8_131_Statistics_Probability_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="av4LpnrKk3A2" colab_type="text" # <img align="left" src="https://lever-client-logos.s3.amazonaws.com/864372b1-534c-480e-acd5-9711f850815c-1524247202159.png" width=200> # <br></br> # <br></br> # # ## *Data Science Unit 1 Sprint 3 Assignment 1* # # # Apply the t-test to real data # # Your assignment is to determine which issues have "statistically significant" differences between political parties in this [1980s congressional voting data](https://archive.ics.uci.edu/ml/datasets/Congressional+Voting+Records). The data consists of 435 instances (one for each congressperson), a class (democrat or republican), and 16 binary attributes (yes or no for voting for or against certain issues). Be aware - there are missing values! # # Your goals: # # 1. Load and clean the data (or determine the best method to drop observations when running tests) # 2. Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01 # 3. Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01 # 4. Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference) # # Note that this data will involve *2 sample* t-tests, because you're comparing averages across two groups (republicans and democrats) rather than a single group against a null hypothesis. # # Stretch goals: # # 1. Refactor your code into functions so it's easy to rerun with arbitrary variables # 2. Apply hypothesis testing to your personal project data (for the purposes of this notebook you can type a summary of the hypothesis you formed and tested) # + id="LPI8lp_7k3A6" colab_type="code" colab={} ### YOUR CODE STARTS HERE import pandas as pd import numpy as np # + [markdown] id="HnmNEHxyllUb" colab_type="text" # # + [markdown] id="DeTUV2TRllv8" colab_type="text" # ##1 Load and clean the data # + id="IfD3K-J7lrme" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="df2df8fb-b440-4da2-a831-103950e88695" # !wget https://archive.ics.uci.edu/ml/machine-learning-databases/voting-records/house-votes-84.data # + id="FkEPiGYqlv4c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="bdbfb209-49c1-4fde-91b8-bf07d8ca7eda" df = pd.read_csv('house-votes-84.data', header=None, names=['party','handicapped-infants','water-project', 'budget','physician-fee-freeze', 'el-salvador-aid', 'religious-groups','anti-satellite-ban', 'aid-to-contras','mx-missile','immigration', 'synfuels', 'education', 'right-to-sue','crime','duty-free', 'south-africa']) print(df.shape) df.head() # + id="WCuMxOKnmXr8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="4deb6527-e7b6-43e3-f94a-095ccaa85916" df = df.replace({'?':np.NaN, 'n':0, 'y':1}) df.head() # + [markdown] id="9cJmXQkKmtvL" colab_type="text" # ##2) Using hypothesis testing, find an issue that democrats support more than republicans with p < 0.01 # + id="i9MojkZrm99D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="d7f103bd-c578-4458-af43-6fa4c112383b" dem = df[df['party']=='democrat'] dem.head() # + id="EnjMl7hQnMPw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} outputId="71979eac-383e-48f0-a161-abd9860bf0f6" rep=df[df['party']=='republican'] rep.head() # + id="8ncSC6l_nr7w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="a35a89f1-f376-4c52-d2f4-73d48e049e8c" rep.describe() # + id="VGne0MPVnt0t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="f7a54d73-6c89-4537-e5a9-c113a57b313b" dem.describe()#I can see dems support aid-to-contras while republicans do not, so lets test that one # + [markdown] id="JrBTZnKNs_lP" colab_type="text" # Null hypothesis: no difference in republican and democrat voting on aid-to-contras # + id="-Cnnw4fVpjxj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1f9e78b9-8ab5-4806-aa06-d8437f797ff8" from scipy.stats import ttest_1samp from scipy.stats import ttest_ind, ttest_ind_from_stats, ttest_rel #rep['aid-to-contras'].isnull().sum() #dem['aid-to-contras'].isnull().sum() ttest_ind(dem['aid-to-contras'], rep['aid-to-contras'], nan_policy='omit') # + [markdown] id="rFEC9f23tY7b" colab_type="text" # I see that the p-value is much less than .01, therefor we reject the null hypothesis # 1) p-value: 2.8..x10^-54 # 2) t-stat: 18.052093200819733 # # since the t-statistic is positive, the left variable is higher # + id="NI84MLPmvPIW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ef21edab-9e57-4c4d-dd2e-36178d640c15" rep['aid-to-contras'].mean() # + id="cp-fdjzlvUSj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3cb3cd5b-e475-4b3a-87d4-284b164dd338" dem['aid-to-contras'].mean() # + [markdown] id="IXOJgnEVv43w" colab_type="text" # ## 3) Using hypothesis testing, find an issue that republicans support more than democrats with p < 0.01 # + [markdown] id="HEO8Kumhzd_Z" colab_type="text" # null hypothesis: there is no difference in voting between repiblicans and democrats # + id="syI9YHhJxP-o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="208dcd49-71b4-41ca-a110-a6c43b3ffd9f" ttest_ind(dem['el-salvador-aid'], rep['el-salvador-aid'], nan_policy='omit') # + [markdown] id="ukp2iCJfzCfB" colab_type="text" # 1) Pval:5.600520111729011e-68 # 2) Tstat:-21.13669261173219 # + [markdown] id="BE6uJQTVzZqk" colab_type="text" # because our pval<.01, we reject the null hypothesis. # because the T stat is negative, the right variable is higher. # + [markdown] id="hbzusUobz2d8" colab_type="text" # ## 4) Using hypothesis testing, find an issue where the difference between republicans and democrats has p > 0.1 (i.e. there may not be much of a difference) # + [markdown] id="KZfww4oR0ir2" colab_type="text" # null hypothesis: There is little difference between immigration votes for democrats and republicans # + id="-nK2lhau0SCd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5306d16a-e98d-452e-da43-4828ec9b6f47" ttest_ind(dem['immigration'], rep['immigration'], nan_policy='omit') # + [markdown] id="NLa0cwrw0r3g" colab_type="text" # 1)pval: 0.08330248490425066 # 2)Tstat: -1.7359117329695164 # + [markdown] id="ci6bOGcf01Vm" colab_type="text" # because the pval>.01 we fail to reject the null hypothesis
module1-statistics-probability-and-inference/Mari_Dominguez_LS_DS8_131_Statistics_Probability_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # **Chapter 9 – Up and running with TensorFlow** # _This notebook contains all the sample code and solutions to the exercises in chapter 9._ # # Setup # First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: # + pycharm={"is_executing": false} # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed) # To plot pretty figures # %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "tensorflow" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) # - # # Creating and running a graph # + pycharm={"is_executing": false} # import tensorflow as tf import tensorflow.compat.v1 as tf tf.disable_v2_behavior() reset_graph() x = tf.Variable(3, name="x") y = tf.Variable(4, name="y") f = x*x*y + y + 2 # + pycharm={"is_executing": false} f # + pycharm={"is_executing": false} sess = tf.Session() sess.run(x.initializer) sess.run(y.initializer) result = sess.run(f) print(result) # + pycharm={"is_executing": false} sess.close() # + pycharm={"is_executing": false} with tf.Session() as sess: x.initializer.run() y.initializer.run() result = f.eval() # + pycharm={"is_executing": false} result # + pycharm={"is_executing": false} init = tf.global_variables_initializer() with tf.Session() as sess: init.run() result = f.eval() # + pycharm={"is_executing": false} result # + pycharm={"is_executing": false} init = tf.global_variables_initializer() # + pycharm={"is_executing": false} sess = tf.InteractiveSession() init.run() result = f.eval() print(result) # + pycharm={"is_executing": false} sess.close() # + pycharm={"is_executing": false} result # - # # Managing graphs # + pycharm={"is_executing": false} reset_graph() x1 = tf.Variable(1) x1.graph is tf.get_default_graph() # + pycharm={"is_executing": false} graph = tf.Graph() with graph.as_default(): x2 = tf.Variable(2) x2.graph is graph # + pycharm={"is_executing": false} x2.graph is tf.get_default_graph() # + pycharm={"is_executing": false} w = tf.constant(3) x = w + 2 y = x + 5 z = x * 3 with tf.Session() as sess: print(y.eval()) # 10 print(z.eval()) # 15 # + pycharm={"is_executing": false} with tf.Session() as sess: y_val, z_val = sess.run([y, z]) print(y_val) # 10 print(z_val) # 15 # - # # Linear Regression # ## Using the Normal Equation # + pycharm={"is_executing": false} import numpy as np from sklearn.datasets import fetch_california_housing reset_graph() housing = fetch_california_housing() m, n = housing.data.shape housing_data_plus_bias = np.c_[np.ones((m, 1)), housing.data] X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") XT = tf.transpose(X) theta = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(XT, X)), XT), y) with tf.Session() as sess: theta_value = theta.eval() # + pycharm={"is_executing": false} theta_value # - # Compare with pure NumPy # + pycharm={"is_executing": false} X = housing_data_plus_bias y = housing.target.reshape(-1, 1) theta_numpy = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y) print(theta_numpy) # - # Compare with Scikit-Learn # + pycharm={"is_executing": false} from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing.data, housing.target.reshape(-1, 1)) print(np.r_[lin_reg.intercept_.reshape(-1, 1), lin_reg.coef_.T]) # - # ## Using Batch Gradient Descent # Gradient Descent requires scaling the feature vectors first. We could do this using TF, but let's just use Scikit-Learn for now. # + pycharm={"is_executing": false} from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaled_housing_data = scaler.fit_transform(housing.data) scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data] # + pycharm={"is_executing": false} print(scaled_housing_data_plus_bias.mean(axis=0)) print(scaled_housing_data_plus_bias.mean(axis=1)) print(scaled_housing_data_plus_bias.mean()) print(scaled_housing_data_plus_bias.shape) # - # ### Manually computing the gradients # + pycharm={"is_executing": false} reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") gradients = 2/m * tf.matmul(tf.transpose(X), error) training_op = tf.assign(theta, theta - learning_rate * gradients) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) sess.run(training_op) best_theta = theta.eval() # + pycharm={"is_executing": false} best_theta # - # ### Using autodiff # Same as above except for the `gradients = ...` line: # + pycharm={"is_executing": false} reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") # + pycharm={"is_executing": false} gradients = tf.gradients(mse, [theta])[0] # + pycharm={"is_executing": false} training_op = tf.assign(theta, theta - learning_rate * gradients) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) sess.run(training_op) best_theta = theta.eval() print("Best theta:") print(best_theta) # - # How could you find the partial derivatives of the following function with regards to `a` and `b`? # + pycharm={"is_executing": false} def my_func(a, b): z = 0 for i in range(100): z = a * np.cos(z + i) + z * np.sin(b - i) return z # + pycharm={"is_executing": false} my_func(0.2, 0.3) # + pycharm={"is_executing": false} reset_graph() a = tf.Variable(0.2, name="a") b = tf.Variable(0.3, name="b") z = tf.constant(0.0, name="z0") for i in range(100): z = a * tf.cos(z + i) + z * tf.sin(b - i) grads = tf.gradients(z, [a, b]) init = tf.global_variables_initializer() # - # Let's compute the function at $a=0.2$ and $b=0.3$, and the partial derivatives at that point with regards to $a$ and with regards to $b$: # + pycharm={"is_executing": false} with tf.Session() as sess: init.run() print(z.eval()) print(sess.run(grads)) # - # ### Using a `GradientDescentOptimizer` # + pycharm={"is_executing": false} reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") # + pycharm={"is_executing": false} optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(mse) # + pycharm={"is_executing": false} init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) sess.run(training_op) best_theta = theta.eval() print("Best theta:") print(best_theta) # - # ### Using a momentum optimizer # + pycharm={"is_executing": false} reset_graph() n_epochs = 1000 learning_rate = 0.01 X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") # + pycharm={"is_executing": false} optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9) # + pycharm={"is_executing": false} training_op = optimizer.minimize(mse) init = tf.global_variables_initializer() # + pycharm={"is_executing": false} with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): sess.run(training_op) best_theta = theta.eval() print("Best theta:") print(best_theta) # - # # Feeding data to the training algorithm # ## Placeholder nodes # + pycharm={"is_executing": false} reset_graph() A = tf.placeholder(tf.float32, shape=(None, 3)) B = A + 5 with tf.Session() as sess: B_val_1 = B.eval(feed_dict={A: [[1, 2, 3]]}) B_val_2 = B.eval(feed_dict={A: [[4, 5, 6], [7, 8, 9]]}) print(B_val_1) # + pycharm={"is_executing": false} print(B_val_2) # - # ## Mini-batch Gradient Descent # + pycharm={"is_executing": false} n_epochs = 1000 learning_rate = 0.01 # + pycharm={"is_executing": false} reset_graph() X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") # + pycharm={"is_executing": false} theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(mse) init = tf.global_variables_initializer() # + pycharm={"is_executing": false} n_epochs = 10 # + pycharm={"is_executing": false} batch_size = 100 n_batches = int(np.ceil(m / batch_size)) # + pycharm={"is_executing": false} def fetch_batch(epoch, batch_index, batch_size): np.random.seed(epoch * n_batches + batch_index) # not shown in the book indices = np.random.randint(m, size=batch_size) # not shown X_batch = scaled_housing_data_plus_bias[indices] # not shown y_batch = housing.target.reshape(-1, 1)[indices] # not shown return X_batch, y_batch with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): for batch_index in range(n_batches): X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) best_theta = theta.eval() # + pycharm={"is_executing": false} best_theta # - # # Saving and restoring a model # + pycharm={"is_executing": false} reset_graph() n_epochs = 1000 # not shown in the book learning_rate = 0.01 # not shown X = tf.constant(scaled_housing_data_plus_bias, dtype=tf.float32, name="X") # not shown y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name="y") # not shown theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") # not shown error = y_pred - y # not shown mse = tf.reduce_mean(tf.square(error), name="mse") # not shown optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # not shown training_op = optimizer.minimize(mse) # not shown init = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): if epoch % 100 == 0: print("Epoch", epoch, "MSE =", mse.eval()) # not shown save_path = saver.save(sess, "/tmp/my_model.ckpt") sess.run(training_op) best_theta = theta.eval() save_path = saver.save(sess, "/tmp/my_model_final.ckpt") # + pycharm={"is_executing": false} best_theta # + pycharm={"is_executing": false} with tf.Session() as sess: saver.restore(sess, "/tmp/my_model_final.ckpt") best_theta_restored = theta.eval() # not shown in the book # + pycharm={"is_executing": false} np.allclose(best_theta, best_theta_restored) # - # If you want to have a saver that loads and restores `theta` with a different name, such as `"weights"`: # + pycharm={"is_executing": false} saver = tf.train.Saver({"weights": theta}) # - # By default the saver also saves the graph structure itself in a second file with the extension `.meta`. You can use the function `tf.train.import_meta_graph()` to restore the graph structure. This function loads the graph into the default graph and returns a `Saver` that can then be used to restore the graph state (i.e., the variable values): # + pycharm={"is_executing": false} reset_graph() # notice that we start with an empty graph. saver = tf.train.import_meta_graph("/tmp/my_model_final.ckpt.meta") # this loads the graph structure theta = tf.get_default_graph().get_tensor_by_name("theta:0") # not shown in the book with tf.Session() as sess: saver.restore(sess, "/tmp/my_model_final.ckpt") # this restores the graph's state best_theta_restored = theta.eval() # not shown in the book # + pycharm={"is_executing": false} np.allclose(best_theta, best_theta_restored) # - # This means that you can import a pretrained model without having to have the corresponding Python code to build the graph. This is very handy when you keep tweaking and saving your model: you can load a previously saved model without having to search for the version of the code that built it. # # Visualizing the graph # ## inside Jupyter # To visualize the graph within Jupyter, we will use a TensorBoard server available online at https://tensorboard.appspot.com/ (so this will not work if you do not have Internet access). As far as I can tell, this code was originally written by <NAME> in his [DeepDream tutorial](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/tutorials/deepdream/deepdream.ipynb). Alternatively, you could use a tool like [tfgraphviz](https://github.com/akimach/tfgraphviz). # + pycharm={"is_executing": false} from tensorflow_graph_in_jupyter import show_graph # + pycharm={"is_executing": false} show_graph(tf.get_default_graph()) # - # ## Using TensorBoard # + pycharm={"is_executing": false} reset_graph() from datetime import datetime now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_logdir = "tf_logs" logdir = "{}/run-{}/".format(root_logdir, now) # + pycharm={"is_executing": false} n_epochs = 1000 learning_rate = 0.01 X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(mse) init = tf.global_variables_initializer() # + pycharm={"is_executing": false} mse_summary = tf.summary.scalar('MSE', mse) file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) # + pycharm={"is_executing": false} n_epochs = 10 batch_size = 100 n_batches = int(np.ceil(m / batch_size)) # + pycharm={"is_executing": false} with tf.Session() as sess: # not shown in the book sess.run(init) # not shown for epoch in range(n_epochs): # not shown for batch_index in range(n_batches): X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size) if batch_index % 10 == 0: summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch}) step = epoch * n_batches + batch_index file_writer.add_summary(summary_str, step) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) best_theta = theta.eval() # not shown # + pycharm={"is_executing": false} file_writer.close() # + pycharm={"is_executing": false} best_theta # - # # Name scopes # + reset_graph() now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_logdir = "tf_logs" logdir = "{}/run-{}/".format(root_logdir, now) n_epochs = 1000 learning_rate = 0.01 X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta") y_pred = tf.matmul(X, theta, name="predictions") # - with tf.name_scope("loss") as scope: error = y_pred - y mse = tf.reduce_mean(tf.square(error), name="mse") # + optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(mse) init = tf.global_variables_initializer() mse_summary = tf.summary.scalar('MSE', mse) file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) # + n_epochs = 10 batch_size = 100 n_batches = int(np.ceil(m / batch_size)) with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): for batch_index in range(n_batches): X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size) if batch_index % 10 == 0: summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch}) step = epoch * n_batches + batch_index file_writer.add_summary(summary_str, step) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) best_theta = theta.eval() file_writer.flush() file_writer.close() print("Best theta:") print(best_theta) # - print(error.op.name) print(mse.op.name) # + reset_graph() a1 = tf.Variable(0, name="a") # name == "a" a2 = tf.Variable(0, name="a") # name == "a_1" with tf.name_scope("param"): # name == "param" a3 = tf.Variable(0, name="a") # name == "param/a" with tf.name_scope("param"): # name == "param_1" a4 = tf.Variable(0, name="a") # name == "param_1/a" for node in (a1, a2, a3, a4): print(node.op.name) # - # # Modularity # An ugly flat code: # + reset_graph() n_features = 3 X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") w1 = tf.Variable(tf.random_normal((n_features, 1)), name="weights1") w2 = tf.Variable(tf.random_normal((n_features, 1)), name="weights2") b1 = tf.Variable(0.0, name="bias1") b2 = tf.Variable(0.0, name="bias2") z1 = tf.add(tf.matmul(X, w1), b1, name="z1") z2 = tf.add(tf.matmul(X, w2), b2, name="z2") relu1 = tf.maximum(z1, 0., name="relu1") relu2 = tf.maximum(z1, 0., name="relu2") # Oops, cut&paste error! Did you spot it? output = tf.add(relu1, relu2, name="output") # - # Much better, using a function to build the ReLUs: # + reset_graph() def relu(X): w_shape = (int(X.get_shape()[1]), 1) w = tf.Variable(tf.random_normal(w_shape), name="weights") b = tf.Variable(0.0, name="bias") z = tf.add(tf.matmul(X, w), b, name="z") return tf.maximum(z, 0., name="relu") n_features = 3 X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") relus = [relu(X) for i in range(5)] output = tf.add_n(relus, name="output") # - file_writer = tf.summary.FileWriter("logs/relu1", tf.get_default_graph()) # Even better using name scopes: # + reset_graph() def relu(X): with tf.name_scope("relu"): w_shape = (int(X.get_shape()[1]), 1) # not shown in the book w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown b = tf.Variable(0.0, name="bias") # not shown z = tf.add(tf.matmul(X, w), b, name="z") # not shown return tf.maximum(z, 0., name="max") # not shown # + n_features = 3 X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") relus = [relu(X) for i in range(5)] output = tf.add_n(relus, name="output") file_writer = tf.summary.FileWriter("logs/relu2", tf.get_default_graph()) file_writer.close() # - # ## Sharing Variables # Sharing a `threshold` variable the classic way, by defining it outside of the `relu()` function then passing it as a parameter: # + reset_graph() def relu(X, threshold): with tf.name_scope("relu"): w_shape = (int(X.get_shape()[1]), 1) # not shown in the book w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown b = tf.Variable(0.0, name="bias") # not shown z = tf.add(tf.matmul(X, w), b, name="z") # not shown return tf.maximum(z, threshold, name="max") threshold = tf.Variable(0.0, name="threshold") X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") relus = [relu(X, threshold) for i in range(5)] output = tf.add_n(relus, name="output") # + reset_graph() def relu(X): with tf.name_scope("relu"): if not hasattr(relu, "threshold"): relu.threshold = tf.Variable(0.0, name="threshold") w_shape = int(X.get_shape()[1]), 1 # not shown in the book w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown b = tf.Variable(0.0, name="bias") # not shown z = tf.add(tf.matmul(X, w), b, name="z") # not shown return tf.maximum(z, relu.threshold, name="max") # - X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") relus = [relu(X) for i in range(5)] output = tf.add_n(relus, name="output") # + reset_graph() with tf.variable_scope("relu"): threshold = tf.get_variable("threshold", shape=(), initializer=tf.constant_initializer(0.0)) # - with tf.variable_scope("relu", reuse=True): threshold = tf.get_variable("threshold") with tf.variable_scope("relu") as scope: scope.reuse_variables() threshold = tf.get_variable("threshold") # + reset_graph() def relu(X): with tf.variable_scope("relu", reuse=True): threshold = tf.get_variable("threshold") w_shape = int(X.get_shape()[1]), 1 # not shown w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown b = tf.Variable(0.0, name="bias") # not shown z = tf.add(tf.matmul(X, w), b, name="z") # not shown return tf.maximum(z, threshold, name="max") X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") with tf.variable_scope("relu"): threshold = tf.get_variable("threshold", shape=(), initializer=tf.constant_initializer(0.0)) relus = [relu(X) for relu_index in range(5)] output = tf.add_n(relus, name="output") # - file_writer = tf.summary.FileWriter("logs/relu6", tf.get_default_graph()) file_writer.close() # + reset_graph() def relu(X): with tf.variable_scope("relu"): threshold = tf.get_variable("threshold", shape=(), initializer=tf.constant_initializer(0.0)) w_shape = (int(X.get_shape()[1]), 1) w = tf.Variable(tf.random_normal(w_shape), name="weights") b = tf.Variable(0.0, name="bias") z = tf.add(tf.matmul(X, w), b, name="z") return tf.maximum(z, threshold, name="max") X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") with tf.variable_scope("", default_name="") as scope: first_relu = relu(X) # create the shared variable scope.reuse_variables() # then reuse it relus = [first_relu] + [relu(X) for i in range(4)] output = tf.add_n(relus, name="output") file_writer = tf.summary.FileWriter("logs/relu8", tf.get_default_graph()) file_writer.close() # + reset_graph() def relu(X): threshold = tf.get_variable("threshold", shape=(), initializer=tf.constant_initializer(0.0)) w_shape = (int(X.get_shape()[1]), 1) # not shown in the book w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown b = tf.Variable(0.0, name="bias") # not shown z = tf.add(tf.matmul(X, w), b, name="z") # not shown return tf.maximum(z, threshold, name="max") X = tf.placeholder(tf.float32, shape=(None, n_features), name="X") relus = [] for relu_index in range(5): with tf.variable_scope("relu", reuse=(relu_index >= 1)) as scope: relus.append(relu(X)) output = tf.add_n(relus, name="output") # - file_writer = tf.summary.FileWriter("logs/relu9", tf.get_default_graph()) file_writer.close() # # Extra material # + reset_graph() with tf.variable_scope("my_scope"): x0 = tf.get_variable("x", shape=(), initializer=tf.constant_initializer(0.)) x1 = tf.Variable(0., name="x") x2 = tf.Variable(0., name="x") with tf.variable_scope("my_scope", reuse=True): x3 = tf.get_variable("x") x4 = tf.Variable(0., name="x") with tf.variable_scope("", default_name="", reuse=True): x5 = tf.get_variable("my_scope/x") print("x0:", x0.op.name) print("x1:", x1.op.name) print("x2:", x2.op.name) print("x3:", x3.op.name) print("x4:", x4.op.name) print("x5:", x5.op.name) print(x0 is x3 and x3 is x5) # - # The first `variable_scope()` block first creates the shared variable `x0`, named `my_scope/x`. For all operations other than shared variables (including non-shared variables), the variable scope acts like a regular name scope, which is why the two variables `x1` and `x2` have a name with a prefix `my_scope/`. Note however that TensorFlow makes their names unique by adding an index: `my_scope/x_1` and `my_scope/x_2`. # # The second `variable_scope()` block reuses the shared variables in scope `my_scope`, which is why `x0 is x3`. Once again, for all operations other than shared variables it acts as a named scope, and since it's a separate block from the first one, the name of the scope is made unique by TensorFlow (`my_scope_1`) and thus the variable `x4` is named `my_scope_1/x`. # # The third block shows another way to get a handle on the shared variable `my_scope/x` by creating a `variable_scope()` at the root scope (whose name is an empty string), then calling `get_variable()` with the full name of the shared variable (i.e. `"my_scope/x"`). # ## Strings # + reset_graph() text = np.array("Do you want some café?".split()) text_tensor = tf.constant(text) with tf.Session() as sess: print(text_tensor.eval()) # - # ## Autodiff # Note: the autodiff content was moved to the [extra_autodiff.ipynb](extra_autodiff.ipynb) notebook. # # Exercise solutions # ## 1. to 11. # See appendix A. # ## 12. Logistic Regression with Mini-Batch Gradient Descent using TensorFlow # First, let's create the moons dataset using Scikit-Learn's `make_moons()` function: # + from sklearn.datasets import make_moons m = 1000 X_moons, y_moons = make_moons(m, noise=0.1, random_state=42) # - # Let's take a peek at the dataset: plt.plot(X_moons[y_moons == 1, 0], X_moons[y_moons == 1, 1], 'go', label="Positive") plt.plot(X_moons[y_moons == 0, 0], X_moons[y_moons == 0, 1], 'r^', label="Negative") plt.legend() plt.show() # We must not forget to add an extra bias feature ($x_0 = 1$) to every instance. For this, we just need to add a column full of 1s on the left of the input matrix $\mathbf{X}$: X_moons_with_bias = np.c_[np.ones((m, 1)), X_moons] # Let's check: X_moons_with_bias[:5] # Looks good. Now let's reshape `y_train` to make it a column vector (i.e. a 2D array with a single column): y_moons_column_vector = y_moons.reshape(-1, 1) # Now let's split the data into a training set and a test set: test_ratio = 0.2 test_size = int(m * test_ratio) X_train = X_moons_with_bias[:-test_size] X_test = X_moons_with_bias[-test_size:] y_train = y_moons_column_vector[:-test_size] y_test = y_moons_column_vector[-test_size:] # Ok, now let's create a small function to generate training batches. In this implementation we will just pick random instances from the training set for each batch. This means that a single batch may contain the same instance multiple times, and also a single epoch may not cover all the training instances (in fact it will generally cover only about two thirds of the instances). However, in practice this is not an issue and it simplifies the code: def random_batch(X_train, y_train, batch_size): rnd_indices = np.random.randint(0, len(X_train), batch_size) X_batch = X_train[rnd_indices] y_batch = y_train[rnd_indices] return X_batch, y_batch # Let's look at a small batch: X_batch, y_batch = random_batch(X_train, y_train, 5) X_batch y_batch # Great! Now that the data is ready to be fed to the model, we need to build that model. Let's start with a simple implementation, then we will add all the bells and whistles. # First let's reset the default graph. reset_graph() # The _moons_ dataset has two input features, since each instance is a point on a plane (i.e., 2-Dimensional): n_inputs = 2 # Now let's build the Logistic Regression model. As we saw in chapter 4, this model first computes a weighted sum of the inputs (just like the Linear Regression model), and then it applies the sigmoid function to the result, which gives us the estimated probability for the positive class: # # $\hat{p} = h_\boldsymbol{\theta}(\mathbf{x}) = \sigma(\boldsymbol{\theta}^T \mathbf{x})$ # # Recall that $\boldsymbol{\theta}$ is the parameter vector, containing the bias term $\theta_0$ and the weights $\theta_1, \theta_2, \dots, \theta_n$. The input vector $\mathbf{x}$ contains a constant term $x_0 = 1$, as well as all the input features $x_1, x_2, \dots, x_n$. # # Since we want to be able to make predictions for multiple instances at a time, we will use an input matrix $\mathbf{X}$ rather than a single input vector. The $i^{th}$ row will contain the transpose of the $i^{th}$ input vector $(\mathbf{x}^{(i)})^T$. It is then possible to estimate the probability that each instance belongs to the positive class using the following equation: # # $ \hat{\mathbf{p}} = \sigma(\mathbf{X} \boldsymbol{\theta})$ # # That's all we need to build the model: X = tf.placeholder(tf.float32, shape=(None, n_inputs + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") theta = tf.Variable(tf.random_uniform([n_inputs + 1, 1], -1.0, 1.0, seed=42), name="theta") logits = tf.matmul(X, theta, name="logits") y_proba = 1 / (1 + tf.exp(-logits)) # In fact, TensorFlow has a nice function `tf.sigmoid()` that we can use to simplify the last line of the previous code: y_proba = tf.sigmoid(logits) # As we saw in chapter 4, the log loss is a good cost function to use for Logistic Regression: # # $J(\boldsymbol{\theta}) = -\dfrac{1}{m} \sum\limits_{i=1}^{m}{\left[ y^{(i)} \log\left(\hat{p}^{(i)}\right) + (1 - y^{(i)}) \log\left(1 - \hat{p}^{(i)}\right)\right]}$ # # One option is to implement it ourselves: epsilon = 1e-7 # to avoid an overflow when computing the log loss = -tf.reduce_mean(y * tf.log(y_proba + epsilon) + (1 - y) * tf.log(1 - y_proba + epsilon)) # But we might as well use TensorFlow's `tf.losses.log_loss()` function: loss = tf.losses.log_loss(y, y_proba) # uses epsilon = 1e-7 by default # The rest is pretty standard: let's create the optimizer and tell it to minimize the cost function: learning_rate = 0.01 optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) # All we need now (in this minimal version) is the variable initializer: init = tf.global_variables_initializer() # And we are ready to train the model and use it for predictions! # There's really nothing special about this code, it's virtually the same as the one we used earlier for Linear Regression: # + n_epochs = 1000 batch_size = 50 n_batches = int(np.ceil(m / batch_size)) with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): for batch_index in range(n_batches): X_batch, y_batch = random_batch(X_train, y_train, batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) loss_val = loss.eval({X: X_test, y: y_test}) if epoch % 100 == 0: print("Epoch:", epoch, "\tLoss:", loss_val) y_proba_val = y_proba.eval(feed_dict={X: X_test, y: y_test}) # - # Note: we don't use the epoch number when generating batches, so we could just have a single `for` loop rather than 2 nested `for` loops, but it's convenient to think of training time in terms of number of epochs (i.e., roughly the number of times the algorithm went through the training set). # For each instance in the test set, `y_proba_val` contains the estimated probability that it belongs to the positive class, according to the model. For example, here are the first 5 estimated probabilities: y_proba_val[:5] # To classify each instance, we can go for maximum likelihood: classify as positive any instance whose estimated probability is greater or equal to 0.5: y_pred = (y_proba_val >= 0.5) y_pred[:5] # Depending on the use case, you may want to choose a different threshold than 0.5: make it higher if you want high precision (but lower recall), and make it lower if you want high recall (but lower precision). See chapter 3 for more details. # Let's compute the model's precision and recall: # + from sklearn.metrics import precision_score, recall_score precision_score(y_test, y_pred) # - recall_score(y_test, y_pred) # Let's plot these predictions to see what they look like: y_pred_idx = y_pred.reshape(-1) # a 1D array rather than a column vector plt.plot(X_test[y_pred_idx, 1], X_test[y_pred_idx, 2], 'go', label="Positive") plt.plot(X_test[~y_pred_idx, 1], X_test[~y_pred_idx, 2], 'r^', label="Negative") plt.legend() plt.show() # Well, that looks pretty bad, doesn't it? But let's not forget that the Logistic Regression model has a linear decision boundary, so this is actually close to the best we can do with this model (unless we add more features, as we will show in a second). # Now let's start over, but this time we will add all the bells and whistles, as listed in the exercise: # * Define the graph within a `logistic_regression()` function that can be reused easily. # * Save checkpoints using a `Saver` at regular intervals during training, and save the final model at the end of training. # * Restore the last checkpoint upon startup if training was interrupted. # * Define the graph using nice scopes so the graph looks good in TensorBoard. # * Add summaries to visualize the learning curves in TensorBoard. # * Try tweaking some hyperparameters such as the learning rate or the mini-batch size and look at the shape of the learning curve. # Before we start, we will add 4 more features to the inputs: ${x_1}^2$, ${x_2}^2$, ${x_1}^3$ and ${x_2}^3$. This was not part of the exercise, but it will demonstrate how adding features can improve the model. We will do this manually, but you could also add them using `sklearn.preprocessing.PolynomialFeatures`. X_train_enhanced = np.c_[X_train, np.square(X_train[:, 1]), np.square(X_train[:, 2]), X_train[:, 1] ** 3, X_train[:, 2] ** 3] X_test_enhanced = np.c_[X_test, np.square(X_test[:, 1]), np.square(X_test[:, 2]), X_test[:, 1] ** 3, X_test[:, 2] ** 3] # This is what the "enhanced" training set looks like: X_train_enhanced[:5] # Ok, next let's reset the default graph: reset_graph() # Now let's define the `logistic_regression()` function to create the graph. We will leave out the definition of the inputs `X` and the targets `y`. We could include them here, but leaving them out will make it easier to use this function in a wide range of use cases (e.g. perhaps we will want to add some preprocessing steps for the inputs before we feed them to the Logistic Regression model). def logistic_regression(X, y, initializer=None, seed=42, learning_rate=0.01): n_inputs_including_bias = int(X.get_shape()[1]) with tf.name_scope("logistic_regression"): with tf.name_scope("model"): if initializer is None: initializer = tf.random_uniform([n_inputs_including_bias, 1], -1.0, 1.0, seed=seed) theta = tf.Variable(initializer, name="theta") logits = tf.matmul(X, theta, name="logits") y_proba = tf.sigmoid(logits) with tf.name_scope("train"): loss = tf.losses.log_loss(y, y_proba, scope="loss") optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) training_op = optimizer.minimize(loss) loss_summary = tf.summary.scalar('log_loss', loss) with tf.name_scope("init"): init = tf.global_variables_initializer() with tf.name_scope("save"): saver = tf.train.Saver() return y_proba, loss, training_op, loss_summary, init, saver # Let's create a little function to get the name of the log directory to save the summaries for Tensorboard: # + from datetime import datetime def log_dir(prefix=""): now = datetime.utcnow().strftime("%Y%m%d%H%M%S") root_logdir = "tf_logs" if prefix: prefix += "-" name = prefix + "run-" + now return "{}/{}/".format(root_logdir, name) # - # Next, let's create the graph, using the `logistic_regression()` function. We will also create the `FileWriter` to save the summaries to the log directory for Tensorboard: # + n_inputs = 2 + 4 logdir = log_dir("logreg") X = tf.placeholder(tf.float32, shape=(None, n_inputs + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") y_proba, loss, training_op, loss_summary, init, saver = logistic_regression(X, y) file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) # - # At last we can train the model! We will start by checking whether a previous training session was interrupted, and if so we will load the checkpoint and continue training from the epoch number we saved. In this example we just save the epoch number to a separate file, but in chapter 11 we will see how to store the training step directly as part of the model, using a non-trainable variable called `global_step` that we pass to the optimizer's `minimize()` method. # # You can try interrupting training to verify that it does indeed restore the last checkpoint when you start it again. # + n_epochs = 10001 batch_size = 50 n_batches = int(np.ceil(m / batch_size)) checkpoint_path = "/tmp/my_logreg_model.ckpt" checkpoint_epoch_path = checkpoint_path + ".epoch" final_model_path = "./my_logreg_model" with tf.Session() as sess: if os.path.isfile(checkpoint_epoch_path): # if the checkpoint file exists, restore the model and load the epoch number with open(checkpoint_epoch_path, "rb") as f: start_epoch = int(f.read()) print("Training was interrupted. Continuing at epoch", start_epoch) saver.restore(sess, checkpoint_path) else: start_epoch = 0 sess.run(init) for epoch in range(start_epoch, n_epochs): for batch_index in range(n_batches): X_batch, y_batch = random_batch(X_train_enhanced, y_train, batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) loss_val, summary_str = sess.run([loss, loss_summary], feed_dict={X: X_test_enhanced, y: y_test}) file_writer.add_summary(summary_str, epoch) if epoch % 500 == 0: print("Epoch:", epoch, "\tLoss:", loss_val) saver.save(sess, checkpoint_path) with open(checkpoint_epoch_path, "wb") as f: f.write(b"%d" % (epoch + 1)) saver.save(sess, final_model_path) y_proba_val = y_proba.eval(feed_dict={X: X_test_enhanced, y: y_test}) os.remove(checkpoint_epoch_path) # - # Once again, we can make predictions by just classifying as positive all the instances whose estimated probability is greater or equal to 0.5: y_pred = (y_proba_val >= 0.5) precision_score(y_test, y_pred) recall_score(y_test, y_pred) y_pred_idx = y_pred.reshape(-1) # a 1D array rather than a column vector plt.plot(X_test[y_pred_idx, 1], X_test[y_pred_idx, 2], 'go', label="Positive") plt.plot(X_test[~y_pred_idx, 1], X_test[~y_pred_idx, 2], 'r^', label="Negative") plt.legend() plt.show() # Now that's much, much better! Apparently the new features really helped a lot. # Try starting the tensorboard server, find the latest run and look at the learning curve (i.e., how the loss evaluated on the test set evolves as a function of the epoch number): # # ``` # $ tensorboard --logdir=tf_logs # ``` # Now you can play around with the hyperparameters (e.g. the `batch_size` or the `learning_rate`) and run training again and again, comparing the learning curves. You can even automate this process by implementing grid search or randomized search. Below is a simple implementation of a randomized search on both the batch size and the learning rate. For the sake of simplicity, the checkpoint mechanism was removed. # + from scipy.stats import reciprocal n_search_iterations = 10 for search_iteration in range(n_search_iterations): batch_size = np.random.randint(1, 100) learning_rate = reciprocal(0.0001, 0.1).rvs(random_state=search_iteration) n_inputs = 2 + 4 logdir = log_dir("logreg") print("Iteration", search_iteration) print(" logdir:", logdir) print(" batch size:", batch_size) print(" learning_rate:", learning_rate) print(" training: ", end="") reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs + 1), name="X") y = tf.placeholder(tf.float32, shape=(None, 1), name="y") y_proba, loss, training_op, loss_summary, init, saver = logistic_regression( X, y, learning_rate=learning_rate) file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) n_epochs = 10001 n_batches = int(np.ceil(m / batch_size)) final_model_path = "./my_logreg_model_%d" % search_iteration with tf.Session() as sess: sess.run(init) for epoch in range(n_epochs): for batch_index in range(n_batches): X_batch, y_batch = random_batch(X_train_enhanced, y_train, batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) loss_val, summary_str = sess.run([loss, loss_summary], feed_dict={X: X_test_enhanced, y: y_test}) file_writer.add_summary(summary_str, epoch) if epoch % 500 == 0: print(".", end="") saver.save(sess, final_model_path) print() y_proba_val = y_proba.eval(feed_dict={X: X_test_enhanced, y: y_test}) y_pred = (y_proba_val >= 0.5) print(" precision:", precision_score(y_test, y_pred)) print(" recall:", recall_score(y_test, y_pred)) # - # The `reciprocal()` function from SciPy's `stats` module returns a random distribution that is commonly used when you have no idea of the optimal scale of a hyperparameter. See the exercise solutions for chapter 2 for more details.
09_up_and_running_with_tensorflow.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=["hide-cell"] import numpy as np import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') # - # # Snowflake time of flight # # Have you ever watched a snowflake fall and thought, "How long has that snowflake been falling?" # # Here, we want to determine the time of flight for a snowflake. We'll start simple and add some complexity to make a more accurate model. # # ## Simplest model # # The simplest assumption we can make is that the only force acting on the snowflake is gravity. This leads to one differential equation, # # $-mg = m\ddot{y}$ # # where $m$ is the mass of the snowflake, $g=9.81~\frac{m}{s^2}$, and $\ddot{y}$ is the second derivative with respect to time for the height of the snowflake i.e. its vertical acceleration. # # We integrate this equation twice to create a solution in terms of $y_0$, initial height and $\dot{y}_0$, its initial vertical velocity. # # $\ddot{y} = -g$ # # $\frac{d\dot{y}}{dt} = -g$ # # $\dot{y} -\dot{y}_0 = -gt$ # # $\frac{dy}{dt} = \dot{y}_0 -gt$ # # $y-y_0 = \dot{y}_0t - \frac{gt^2}{2}$ # # $y(t) = y_0 +\dot{y}_0t - \frac{gt^2}{2}$ # # Now, we need the initial height and initial speed of the snowflake. A typical cloud might sit $\approx 1,000~m$ above the ground and let's assume the initial vertical speed is 0 m/s. # # This leaves, $y_0=1000~m~and~\dot{y}_0=0~m/s$ # # ![Cloud heights and precipitation](https://upload.wikimedia.org/wikipedia/commons/5/57/Cloud_types_en.svg) # + tags=["hide-input"] t = np.linspace(0, np.sqrt(1000/9.81*2), 1000) y = 1000 - 9.81*t**2/2 plt.plot(t,y) plt.xlabel('time (s)') plt.ylabel('height (m)') # + tags=["hide-input"] t[-1] # - # ## Our solution - constant acceleration # # According to your calculations, the snowflake will start at 1,000-m altitude and reach ground level at almost 14.3 seconds. # # > __Note:__ What's wrong with the height curve here? # ## A little problem - speed of snowflake # # The graph of height-vs-time keeps getting steeper. The steeper the graph, the faster the snowflake. How fast is your snowflake traveling when it hits the ground? # + tags=["hide-input"] dy = -9.81*t plt.plot(t, dy) plt.xlabel('time (s)') plt.ylabel('vertical speed (m/s)') # - # According to your calculations, the snowflake is traveling at 140 m/s when it strikes the ground. This is >300 mph (or >500 km/h). Whoah... # # If you caught this snowflake on your tongue it would feel like catching an icy [BB gun pellet](https://en.wikipedia.org/wiki/BB_gun#Safety), ouch! # # You are missing a key force in the free body diagram that _slows_ down the snowflake, [_air resistance_ or drag](https://www.grc.nasa.gov/www/k-12/VirtualAero/BottleRocket/airplane/falling.html). # # ![Air drag free body diagram](https://www.grc.nasa.gov/www/k-12/VirtualAero/BottleRocket/airplane/Images/falling.gif) # ## Improved model with air resistance # # Adding drag to the free body diagram, now you have a new model. # # $m\ddot{y} = -mg + C_d \frac{r\dot{y}^2}{2}A$ # # where $C_d$ is the [unitless drag coefficient](https://en.wikipedia.org/wiki/Drag_coefficient), $r=1.025~kg/m^3$ is the [density of air](https://www.macinstruments.com/blog/what-is-the-density-of-air-at-stp/), $m=3~mg$ is the [mass of a snowflake](https://hypertextbook.com/facts/2001/JudyMoy.shtml), and $A=\piD^2/4$ is the area of the snowflake of [diameter](https://gpm.nasa.gov/sites/default/files/document_files/parsivel_Tokay_c3vp_agu.pdf) $D=6~mm$. # # > __Note:__ The force of drag always opposes the velocity of the snowflake. Keep in mind if the snowflake moves upward, the force reverses direction. # # Now, integrating the equation can be a bit involved, but using v(t=0)=0, there results # # $\frac{dv}{dt} = -g +\frac{C_d rA}{2}v^2$ # # $v(t) = -\sqrt{\frac{mg}{C_d rA}}\tanh\frac{g C_d rA}{m}t$ # # + tags=["hide-input"] m = 3e-6 # mg Cd = 0.5 # no units r = 1.025 # kg/m/m/m g = 9.81 #m/s/s D = 6e-3 # mm - mm A = np.pi*D**2/4 v = -np.sqrt(2*m*g/Cd/r/A)*np.tanh(g*Cd*r*A/m*t) plt.plot(t, v) plt.xlim(0,0.1) plt.xlabel('time (s)') plt.ylabel('vertical speed (m/s)') # - # ### Make a comparison to previous model # # In the constant acceleration model, the snowflake reached the ground in 14 seconds. In the improved air resistance model, you find that the snowflake only accelerates for 0.5 seconds. After that, it floats at a constant velocity until impact. This means, you can approximate that the snowflake travels at a constant velocity equal to its terminal velocity, $v_{term}$, as such # # $\frac{dv}{dt} = 0 = -g +\frac{C_d rA}{2m}v_{term}^2$ # # $v_{term} = -\sqrt{\frac{2mg}{C_d rA}}$ # + tags=["hide-input"] vterm = np.sqrt(2*m*g/Cd/r/A) t_improved = np.linspace(0,1000/vterm) y_improved = 1000 - vterm*t_improved plt.plot(t,y, label = 'constant acceleration') plt.plot(t_improved,y_improved, label='constant velocity') print('total flight time is {}'.format(t_improved[-1])) plt.xlabel('time (s)') plt.ylabel('height (m)') # - # ## Wrapping up # # The first model you created assumed constant acceleration, but after accounting for drag you found out that a snowflake reaches a terminal velocity in less than 0.5 seconds. The more accurate model to calculate time of flight was actually a constant velocity model. # # You found that the snowflake drifts slowly to the surface over the course of 496 seconds (or 8 minutes). Gently landing at 2 m/s.
module_01/snowflake.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_tensorflow_p36 # language: python # name: conda_tensorflow_p36 # --- # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np # %matplotlib inline from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import seaborn as sns # - from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.model_selection import cross_validate from sklearn.metrics import confusion_matrix df = pd.read_csv('../data/balanced-classes/train.csv') df.shape df.head() X = df.iloc[:,0:8] y = df.iloc[:,8] y.shape X.shape rf = RandomForestClassifier() cv_results = cross_validate(rf, X, y, cv = 5, scoring="accuracy", n_jobs=-1, return_estimator=True) print(cv_results) cv_results['test_score'] model = cv_results['estimator'][4] y_pred = model.predict(X) # + from sklearn import metrics def get_metrics(true_labels, predicted_labels): print('Accuracy:', np.round( metrics.accuracy_score(true_labels, predicted_labels), 4)) print('Precision:', np.round( metrics.precision_score(true_labels, predicted_labels, average='weighted'), 4)) print('Recall:', np.round( metrics.recall_score(true_labels, predicted_labels, average='weighted'), 4)) print('F1 Score:', np.round( metrics.f1_score(true_labels, predicted_labels, average='weighted'), 4)) def display_classification_report(true_labels, predicted_labels, classes=[1,0]): report = metrics.classification_report(y_true=true_labels, y_pred=predicted_labels, labels=classes, digits=4) print(report) # - get_metrics(y, y_pred) display_classification_report(y, y_pred) # + conf_mat = confusion_matrix(y, y_pred) dataframe = pd.DataFrame(conf_mat) dataframe.head() # - # Create heatmap sns.heatmap(dataframe, annot=True, cbar=None, cmap="Blues", fmt='g'); plt.title("Confusion Matrix"), plt.tight_layout(); plt.ylabel("True Class"), plt.xlabel("Predicted Class"); plt.show(); imp_features = pd.DataFrame() imp_features['feature'] = X.columns imp_features['importance'] = model.feature_importances_ imp_features.sort_values(by=['importance'], ascending=True, inplace=True) imp_features.set_index('feature', inplace=True) imp_features.plot(kind='barh', figsize=(25, 10)) # # prediction on test data test_df = pd.read_csv('../data/balanced-classes/test.csv') test_df.shape test_df.head() X_test = test_df.iloc[:,0:8] y_test = test_df.iloc[:,8] y_test.shape X_test.shape y_pred_test = model.predict(X_test) get_metrics(y_test, y_pred_test) display_classification_report(y_test, y_pred_test) # + conf_mat_test = confusion_matrix(y_test, y_pred_test) dataframe_test = pd.DataFrame(conf_mat_test) dataframe_test.head() # - # Create heatmap sns.heatmap(dataframe_test, annot=True, cbar=None, cmap="Blues", fmt='g'); plt.title("Confusion Matrix"), plt.tight_layout(); plt.ylabel("True Class"), plt.xlabel("Predicted Class"); plt.show(); # # prediction on full data set full_df = pd.read_csv('../data/data-with-features.csv') full_df.shape full_df.head() X_full_df = full_train_df.iloc[:,0:8] y_full_df = full_train_df.iloc[:,8] X_full_df.shape y_full_df.shape y_pred_full_df = model.predict(X_full_df) get_metrics(y_full_df, y_pred_full_df) display_classification_report(y_full_df, y_pred_full_df) # + conf_mat_full = confusion_matrix(y_full_df, y_pred_full_df) dataframe_full = pd.DataFrame(conf_mat_full) dataframe_full.head() # - # Create heatmap sns.heatmap(dataframe_full, annot=True, cbar=None, cmap="Blues", fmt='g'); plt.title("Confusion Matrix"), plt.tight_layout(); plt.ylabel("True Class"), plt.xlabel("Predicted Class"); plt.show();
team-i-catch/deepak/notebooks/random-forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + active="" # 说明: # 给你一份 n 位朋友的亲近程度列表,其中 n 总是 偶数 。 # 对每位朋友 i,preferences[i] 包含一份 按亲近程度从高到低排列 的朋友列表。 # 换句话说,排在列表前面的朋友与 i 的亲近程度比排在列表后面的朋友更高。 # 每个列表中的朋友均以 0 到 n-1 之间的整数表示。 # 所有的朋友被分成几对,配对情况以列表 pairs 给出, # 其中 pairs[i] = [xi, yi] 表示 xi 与 yi 配对,且 yi 与 xi 配对。 # 但是,这样的配对情况可能会是其中部分朋友感到不开心。 # 在 x 与 y 配对且 u 与 v 配对的情况下,如果同时满足下述两个条件,x 就会不开心: # 1、x 与 u 的亲近程度胜过 x 与 y,且 # 2、u 与 x 的亲近程度胜过 u 与 v # 返回 不开心的朋友的数目 。 # # 示例 1: # 输入:n = 4, preferences = [[1, 2, 3], [3, 2, 0], [3, 1, 0], [1, 2, 0]], pairs = [[0, 1], [2, 3]] # 输出:2 # 解释: # 朋友 1 不开心,因为: # - 1 与 0 配对,但 1 与 3 的亲近程度比 1 与 0 高,且 # - 3 与 1 的亲近程度比 3 与 2 高。 # 朋友 3 不开心,因为: # - 3 与 2 配对,但 3 与 1 的亲近程度比 3 与 2 高,且 # - 1 与 3 的亲近程度比 1 与 0 高。 # 朋友 0 和 2 都是开心的。 # # 示例 2: # 输入:n = 2, preferences = [[1], [0]], pairs = [[1, 0]] # 输出:0 # 解释:朋友 0 和 1 都开心。 # # 示例 3: # 输入:n = 4, preferences = [[1, 3, 2], [2, 3, 0], [1, 3, 0], [0, 2, 1]], pairs = [[1, 3], [0, 2]] # 输出:4 # # 提示: # 2 <= n <= 500 # n 是偶数 # preferences.length == n # preferences[i].length == n - 1 # 0 <= preferences[i][j] <= n - 1 # preferences[i] 不包含 i # preferences[i] 中的所有值都是独一无二的 # pairs.length == n/2 # pairs[i].length == 2 # xi != yi # 0 <= xi, yi <= n - 1 # 每位朋友都 恰好 被包含在一对中 # - class Solution: def unhappyFriends(self, n: int, preferences: List[List[int]], pairs: List[List[int]]) -> int: ranks = {} for person, pref in enumerate(preferences): ranks[person] = defaultdict(lambda: n) for rank, peer in enumerate(pref): ranks[person][peer] = rank partner = {} for p1, p2 in pairs: partner[p1] = p2 partner[p2] = p1 ans = 0 for p1, p2 in pairs: for peer in preferences[p1]: if (ranks[p1][peer] < ranks[p1][p2] and ranks[peer][p1] < ranks[peer][partner[peer]]): ans += 1 break for peer in preferences[p2]: if (ranks[p2][peer] < ranks[p2][p1] and ranks[peer][p2] < ranks[peer][partner[peer]]): ans += 1 break return ans
Array/1026/1583. Count Unhappy Friends.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Network Analysis using NetworkX # + [markdown] slideshow={"slide_type": "slide"} # ## What are Networks (Graphs)? # # A graph G is represented by a set of nodes and a set of edges. An edge between two nodes in a graph signifies a relationship between those two nodes. Edges can be directed and undirected. # ![title](img/network.png) # + [markdown] slideshow={"slide_type": "slide"} # # # Examples? # + [markdown] slideshow={"slide_type": "slide"} # ![title](img/example.png) # + [markdown] slideshow={"slide_type": "slide"} # NetworkX uses dictionaries underneath to store node and edge data. # It's dict-o-dict-o-dict-o-dict to be precise. # ``` # G['node1'] # G['node1']['node2'] # G['node1']['node2']['some_id']['some_attrb'] # ``` # + slideshow={"slide_type": "slide"} import networkx as nx import matplotlib.pyplot as plt import numpy as np # %matplotlib inline # + slideshow={"slide_type": "slide"} # Create an empty graph object with no nodes and edges. G = nx.Graph() # DiGraph, MultiGraph, MultiDiGraph # + # Add nodes to our graph object # In NetworkX, nodes can be any hashable object e.g. a text string, an image, # an XML object, another Graph, a customized node object, etc. G.add_node('1') G.add_node(1) G.add_node('second') # G.add_node({'dictionary': 'will throw error'}) # G.add_node([1, 2]) # - list_of_nodes = [1, 2, 3, 'node4'] G.add_nodes_from(list_of_nodes) # Access nodes in a Graph object G.nodes() # + slideshow={"slide_type": "slide"} # NetworkX has a lot of graph generators path_graph is one of them. H = nx.path_graph(7) print H.nodes() # - G.add_nodes_from(H) print G.nodes() # + [markdown] slideshow={"slide_type": "-"} # Difference between `G.add_node(H)` and `G.add_nodes_from(H)`? # - G.add_node(H) print G.nodes() # + slideshow={"slide_type": "slide"} # Now let's talk about edges. # Edge between two nodes means that they share some property/relationship # G.add_node(H) G.add_edge(0, 'second') G.add_edge(2, 3) G.add_edge('second', 'node4') list_of_edges = [(2, 3), (4, 5), ('node4', 0)] G.add_edges_from(list_of_edges) # Check out edges G.edges() # + slideshow={"slide_type": "slide"} # Number of nodes and edges. print G.number_of_nodes(), len(G), len(G.nodes()) print G.number_of_edges(), len(G.edges()) # - print G.nodes() G.remove_node(0) print G.nodes() print G.edges() G.remove_edge(4, 5) print G.edges() G.clear() print G.nodes(), G.edges() # + slideshow={"slide_type": "slide"} # One more graph generator. This will create # a Erdos-Reyni Graph G = nx.erdos_renyi_graph(10, 0.2, seed=1) # Let's checkout nodes and edges print G.nodes() print G.edges() nx.draw(G) # + slideshow={"slide_type": "slide"} matrix = nx.to_numpy_matrix(G) # print matrix fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.set_aspect('equal') plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # Adding attributes and weights. # + slideshow={"slide_type": "-"} G.add_edge(1, 2, weight=4.7) G.add_edges_from([(3, 4), (4, 5)], color='red') G.add_edges_from([(1, 2, {'color': 'blue'}), (2, 3, {'weight': 8})]) G[1][2]['weight'] = 4.7 # + slideshow={"slide_type": "slide"} # Adding attributes to graphs, nodes, and edges. G.graph['attr'] = 'EuroSciPy' print G.graph G.add_node(1, time='11:00AM') print G.nodes() print G.nodes(data=True) # + # Accessing the graph dictionary print 'nodes: ', G.nodes() print 'edges: ', G.edges() print G[0] print G[1] print G[1][2] # - print G[1] print G[1][2] print G[1][2]['color'] # + [markdown] slideshow={"slide_type": "slide"} # ### Exercise - 1 # + G = nx.Graph() list_of_cities = [('Paris', 'Munich', 841), ('Munich', 'Berlin', 584), ('Berlin', 'London', 1101), ('Paris', 'Barcelona', 1038)] G.add_weighted_edges_from(list_of_cities) # print G.nodes() # print G.edges(data=True) # Iterate through the edges and find the highest weight. # + slideshow={"slide_type": "slide"} result = max([w for u, v, w in G.edges(data=True)]) print result # max(G.edges(data=True), key=lambda x:x[2]) # + [markdown] slideshow={"slide_type": "slide"} # Let's work on a read world network. # # Arxiv GR-QC (General Relativity and Quantum Cosmology) collaboration network is from the e-print arXiv and covers scientific collaborations between authors papers submitted to General Relativity and Quantum Cosmology category. If an author i co-authored a paper with author j, the graph contains a undirected edge from i to j. If the paper is co-authored by k authors this generates a completely connected (sub)graph on k nodes. # # source: http://snap.stanford.edu/data/index.html#canets # - import csv authors_graph = nx.Graph() with open('CA-GrQc.txt', 'r') as f: reader = csv.reader(f, delimiter='\t') for row in reader: authors_graph.add_edge(row[0], row[1]) # + slideshow={"slide_type": "slide"} print authors_graph.number_of_edges() print authors_graph.number_of_nodes() # + [markdown] slideshow={"slide_type": "-"} # Neighbors of a node. # - # Neighbors/ degree of node is one way of calculating the importance # of the node. Influential nodes. print authors_graph.neighbors('22504') # print len(authors_graph.neighbors('22504')) # print nx.degree(authors_graph, nbunch=['22504']) # print authors_graph.degree(nbunch=['22504']) # + [markdown] slideshow={"slide_type": "slide"} # ### Exercise - 2 # # Create a list of (node, degree of node) tuples and find the node with maximum degree. # + slideshow={"slide_type": "slide"} result = [(node, len(authors_graph.neighbors(node))) for node in authors_graph.nodes_iter()] # - max(result, key=lambda node:node[1]) authors_graph.degree()['21012'] # returns a dictionary of degree keyed by node # + slideshow={"slide_type": "slide"} authors_graph.degree() # + slideshow={"slide_type": "slide"} nx.degree_centrality(authors_graph) # + # Various other measures of centrality like ``nx.closeness_centrality`` # , ``nx.betweenness_centrality``, flow centrality. # + [markdown] slideshow={"slide_type": "slide"} # ### Exercise - 3 # # Plot degree centrality of authors_graph. # # (count vs degree centrality) # + slideshow={"slide_type": "slide"} # G = nx.erdos_renyi_graph(500, 0.9, seed=1) plt.hist(nx.degree_centrality(authors_graph).values()) plt.show() # + slideshow={"slide_type": "slide"} # Lets talk about connected components of a graph. print [len(c) for c in sorted(nx.connected_components(authors_graph), key=len, reverse=True)] # + slideshow={"slide_type": "slide"} graphs = [c for c in sorted(nx.connected_component_subgraphs(authors_graph), key=len, reverse=True)] # - len(graphs[0]) nx.draw(graphs[5]) # + [markdown] slideshow={"slide_type": "slide"} # ### Graph Traversal # - print nx.shortest_path(graphs[0], '22504', '23991') print len(nx.shortest_path(graphs[0], '22504', '23991')) print nx.shortest_path_length(graphs[0], '22504', '23991') # + # nx.shortest_path(authors_graph, '22504', '17453') # + [markdown] slideshow={"slide_type": "slide"} # ### Excersise - 4 # ##### Six degrees of separation, Erdos Number, Bacon Number!! # # Find the '22504' number of the graph G, if there is no connection between nodes then give it the number `-1`. # Also plot a histogram of the '22504' number. # # HINT: `nx.shortest_path_length` # + slideshow={"slide_type": "slide"} # G = nx.fast_gnp_random_graph(10000, 0.1, seed=1) # + slideshow={"slide_type": "-"} d = {} for node in authors_graph.nodes(): try: d[node] = nx.shortest_path_length(authors_graph, '22504', node) except: d[node] = -1 # print d # - plt.hist(d.values()) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # #### Structures, Cliques in a Network # # A subset of nodes which induce a complete subgraph is a clique. # - G = nx.complete_graph(5) # In a complete graph all the nodes are connected to each other. G.add_edge(4, 5) nx.draw(G) list(nx.clique.find_cliques(G)) # + [markdown] slideshow={"slide_type": "slide"} # ### Triads in a Network # - G = nx.Graph() list_of_edges = [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (3, 4), (5, 6)] G.add_edges_from(list_of_edges) nx.draw(G, with_labels=True) list(nx.find_cliques(G)) # + slideshow={"slide_type": "slide"} # Facebook friend search, amazon product recommendation. G = nx.complete_graph(4) G.remove_edge(0, 1) nx.draw(G, with_labels=True) print list(nx.find_cliques(G)) # + [markdown] slideshow={"slide_type": "slide"} # #### Link Prediction usign Jaccard coefficient # - G = nx.erdos_renyi_graph(30, 0.2) list(nx.jaccard_coefficient(G)) # + [markdown] slideshow={"slide_type": "slide"} # ### Excersise - 5 # # Create a recommender for authors by listing the top 5 authors by jaccard coefficient. # + slideshow={"slide_type": "slide"} # %%time d = {} for u, v, p in nx.jaccard_coefficient(authors_graph): if u in d: d[u].append((v, p)) elif u not in d: d[u] = [(v, p)] if v in d: d[v].append((u, p)) elif v not in d: d[v] = [(u, p)] # - reco = {} for u, w in d.items(): reco[u] = sorted(w, key=lambda x: x[1], reverse=True)[0:5] reco # + [markdown] slideshow={"slide_type": "slide"} # ### Directed Graphs # # ![title](img/pagerank.png) # + slideshow={"slide_type": "slide"} G = nx.DiGraph() G.add_edge(1, 2) print G.edges() # G[1][2] # G.is_directed() # type(G) # - G.add_edges_from([(1, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2)]) nx.draw(G, with_labels=True) # + slideshow={"slide_type": "slide"} G.in_degree() # - nx.pagerank(G) # + slideshow={"slide_type": "slide"} G.add_edge(5, 6) nx.draw(G, with_labels=True) # - nx.pagerank(G) # + slideshow={"slide_type": "slide"} G.add_edge(2, 8) nx.draw(G, with_labels=True) # - nx.pagerank(G) # + [markdown] slideshow={"slide_type": "slide"} # ### Excersise - 6 # # Arxiv HEP-TH (high energy physics theory) citation graph is from the e-print arXiv and covers all the citations within a dataset of 27,770 papers with 352,807 edges. If a paper i cites paper j, the graph contains a directed edge from i to j. If a paper cites, or is cited by, a paper outside the dataset, the graph does not contain any information about this. # # The data covers papers in the period from January 1993 to April 2003 (124 months). It begins within a few months of the inception of the arXiv, and thus represents essentially the complete history of its HEP-TH section. # # * Open the file cit-HepTh.txt and create a directed graph. # # ``` # import csv # citation = nx.DiGraph() # with open('cit-HepTh.txt', 'r') as f: # reader = csv.reader(f, delimiter='\t') # for row in reader: # citation.add_edge(row[0], row[1]) # ``` # + [markdown] slideshow={"slide_type": "slide"} # # * Calculate the page rank of nodes (papers) using nx.pagerank() # * Find the node with max (M) and min (m) page rank score # * Calculate the in degree and out degree of the node M and m. # * Compare the average of page rank score of nodes connected to M and m. (M->node and m->node) # * Open the file cit-HepTh-dates.txt and add date as a node attribute to the citation network. # # ``` # import csv # with open('cit-HepTh-dates.txt', 'r') as f: # reader = csv.reader(f, delimiter='\t') # for row in reader: # citation.add_node(row[0], date=row[1]) # ``` # + [markdown] slideshow={"slide_type": "slide"} # # * Iterate through the nodes and find all the papers published in 2001. (Not all the papers have the data attribute!!!) # * Find the density of the graph using nx.density(G). Is it a sparse or dense graph? # * Find the number of nodes in the largest connected component of the network. (Note: You need to use weakly_connected_components instead of connected_components). # + slideshow={"slide_type": "slide"} import csv citation = nx.DiGraph() with open('cit-HepTh.txt', 'r') as f: reader = csv.reader(f, delimiter='\t') for row in reader: citation.add_edge(row[0], row[1]) # - # %%time cite = nx.pagerank(citation) print max(cite.items(), key = lambda x:x[1]) print min(cite.items(), key = lambda x:x[1]) # + slideshow={"slide_type": "slide"} print citation.in_degree('9407087') print citation.out_degree('9407087') print citation.in_degree('9305019') print citation.out_degree('9305019') score, count = 0, 0 for node in citation['9407087']: score += cite[node] count += 1 print score/count score, count = 0, 0 for node in citation['9305019']: score += cite[node] count += 1 print score/count # + slideshow={"slide_type": "slide"} import csv with open('cit-HepTh-dates.txt', 'r') as f: reader = csv.reader(f, delimiter='\t') for row in reader: citation.add_node(row[0], date=row[1]) # - papers_2001 = [] for node, attr in citation.nodes(data=True): if 'date' in attr: if attr['date'][0:4] == '2001': papers_2001.append(node) print len(papers_2001) # nx.density(nx.erdos_renyi_graph(100, 0.4)) nx.density(citation) # + slideshow={"slide_type": "slide"} print [len(c) for c in sorted(nx.weakly_connected_components(citation), key=len, reverse=True)] # -
Slides.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="kt_bqdZgHt1b" colab_type="code" outputId="86a5c4b7-5e7b-4ec8-9c9e-767af9c078ee" colab={"base_uri": "https://localhost:8080/", "height": 124} from google.colab import drive drive.mount('/content/gdrive') # + id="MZa5u_SOEbBl" colab_type="code" outputId="37c0acf5-4ccc-4fc3-8031-a6b646fcba7f" colab={"base_uri": "https://localhost:8080/", "height": 34} """Trains a ResNet on the CIFAR10 dataset. ResNet v1 [a] Deep Residual Learning for Image Recognition https://arxiv.org/pdf/1512.03385.pdf ResNet v2 [b] Identity Mappings in Deep Residual Networks https://arxiv.org/pdf/1603.05027.pdf """ import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.callbacks import Callback import numpy as np import pandas as pd from datetime import datetime import time import os from keras.constraints import Constraint from keras import initializers, layers from keras.layers import Lambda import scipy.io as sio class ShrinkageConstraint(Constraint): def __init__(self, axis=0): self.axis = axis def __call__(self, w): # apply unitnorm #w = w / (K.epsilon() + K.sqrt(K.sum(K.square(w), # axis=self.axis, # keepdims=True))) # apply non negative w *= K.cast(K.greater_equal(w, 0.), K.floatx()) # apply max value to be 1 w *= K.cast(K.less_equal(w, 1.), K.floatx()) return w class ShrinkageFactor(layers.Layer): """ This is the sigma object in the algorithm 1 by Beygelzimer (Online Gradient Boosting) """ def __init__(self, step_size, trainable=True, **kwargs): self.step_size = step_size self.trainable = trainable super(ShrinkageFactor, self).__init__(**kwargs) def build(self, input_shape): # Create a trainable weight variable for this layer. self.W = self.add_weight(name='highway', shape=(1, 1), initializer=initializers.Zeros(), constraint=ShrinkageConstraint(), regularizer=l2(0.01), trainable=self.trainable) self.count = K.variable(0, name="epoch") super(ShrinkageFactor, self).build(input_shape) # Be sure to call this at the end def call(self, x): return (1-self.step_size*self.W)*x updates = [] if self.count < 80: updates.append((self.count, self.count+1)) return x else: updates.append((self.count, self.count+1)) return (1-self.step_size*self.W)*x def compute_output_shape(self, input_shape): if isinstance(input_shape, list): return input_shape[0] return input_shape class TimingCallback(Callback): def on_train_begin(self, logs={}): self.times = [] def on_epoch_begin(self, batch, logs={}): self.epoch_time_start = time.time() def on_epoch_end(self, batch, logs={}): # write stuff to disc here... self.times.append(time.time() - self.epoch_time_start) def lr_schedule(epoch): """Learning Rate Schedule Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs. Called automatically every epoch as part of callbacks during training. # Arguments epoch (int): The number of epochs # Returns lr (float32): learning rate """ lr = 1e-3 if epoch > 180: lr *= 0.5e-3 elif epoch > 160: lr *= 1e-3 elif epoch > 120: lr *= 1e-2 elif epoch > 80: lr *= 1e-1 print('Learning rate: ', lr) return lr def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True, stack=0, res_block="placeholder"): """2D Convolution-Batch Normalization-Activation stack builder # Arguments inputs (tensor): input tensor from input image or previous layer num_filters (int): Conv2D number of filters kernel_size (int): Conv2D square kernel dimensions strides (int): Conv2D square stride dimensions activation (string): activation name batch_normalization (bool): whether to include batch normalization conv_first (bool): conv-bn-activation (True) or bn-activation-conv (False) stack (int): stack number for layer naming purposes res_block (string): name of the res_block for naming purposes # Returns x (tensor): tensor as input to the next layer """ conv = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-4), name=f"resnet_{stack}_{res_block}" ) x = inputs if conv_first: x = conv(x) if batch_normalization: x = BatchNormalization(name=f"bn_{stack}_{res_block}")(x) if activation is not None: x = Activation(activation)(x) else: if batch_normalization: x = BatchNormalization(f"bn_{stack}_{res_block}")(x) if activation is not None: x = Activation(activation)(x) x = conv(x) return x def resnet_block(x, stack, res_block, num_filters, boost=True): strides = 1 if stack > 0 and res_block == 0: # first layer but not first stack strides = 2 # downsample y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides, stack=stack, res_block=f"{res_block}a") y = resnet_layer(inputs=y, num_filters=num_filters, activation=None, stack=stack, res_block=f"{res_block}b") if stack > 0 and res_block == 0: # first layer but not first stack # linear projection residual shortcut connection to match # changed dims x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False, stack=stack, res_block=f"{res_block}c") if boost: step_size = 1.0 y = ShrinkageFactor(step_size, False, name=f"shrinkage_{stack}_{res_block}")(y) # x = Lambda(lambda x: x * step_size, name=f"shrinkage_lambda_{stack}_{res_block}")(x) x = keras.layers.add([x, y], name=f"add_{stack}_{res_block}") x = Activation('relu')(x) return x # + id="m_Sn5-vcAbuy" colab_type="code" colab={} # Training parameters batch_size = 32 # orig paper trained all networks with batch_size=128 epochs = 200 data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True # Model parameter # ---------------------------------------------------------------------------- # | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch # Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti # |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2) # ---------------------------------------------------------------------------- # ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---) # ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA) # ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA) # ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100) # ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180) # ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---) # ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---) # --------------------------------------------------------------------------- n = 3 # Model version # Orig paper: version = 1 (ResNet v1), Improved ResNet: version = 2 (ResNet v2) version = 1 # Computed depth from supplied model parameter n depth = n * 6 + 2 # n=3 --> 20, n=5 --> 32, n=7 --> 44, n=9 --> 56 # Model name, depth and version model_type = 'SVHN_ResNet%dv%d_noshare' % (depth, version) # + id="Si99s5jkAiXs" colab_type="code" colab={} # Load SVHN (dataset 2) path = "/content/gdrive/My Drive/colab/svhn" train_images = sio.loadmat(path+'/train_32x32.mat') test_images = sio.loadmat(path+'/test_32x32.mat') # + id="XN7-KxI8BSlQ" colab_type="code" colab={} x_train = train_images["X"] x_train = np.transpose(x_train, (3, 0, 1, 2)) y_train = train_images["y"] y_train[y_train == 10] = 0 x_test = test_images["X"] x_test = np.transpose(x_test, (3, 0, 1, 2)) y_test = test_images["y"] y_test[y_test == 10] = 0 # + id="wab8Co6qDMpD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0aea9f82-ac41-4b2d-a5bc-49967c680dc2" x_test.shape # + id="buBpYdrSAglY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="7f9276d4-0cde-4dff-fbc8-6af4fd551054" # Load the CIFAR10 data. # (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Input image dimensions. input_shape = x_train.shape[1:] # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) # + id="ZBcYE65FCL5j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="cfaecd25-f97a-4ab2-ac29-088fec651891" 73257/32 # + id="qetyT-4WGRNt" colab_type="code" outputId="255bba54-65fb-4d26-d50a-ecc96a12df71" colab={"base_uri": "https://localhost:8080/", "height": 3143} # model = resnet_v1(input_shape=input_shape, depth=depth) # we shall hardcode the model...with num_res_blocks=3 # model = resnet_v1(input_shape=input_shape, depth=depth) # we shall hardcode the model...with num_res_blocks=3 num_filters = 16 inputs = Input(shape=input_shape) x = resnet_layer(inputs=inputs, res_block='preprocessing') block0_0 = resnet_block(x, 0, 0, num_filters) block0_1 = resnet_block(block0_0, 0, 1, num_filters) block0_2 = resnet_block(block0_1, 0, 2, num_filters) block1_0 = resnet_block(block0_2, 1, 0, num_filters*2) block1_1 = resnet_block(block1_0, 1, 1, num_filters*2) block1_2 = resnet_block(block1_1, 1, 2, num_filters*2) block2_0 = resnet_block(block1_2, 2, 0, num_filters*4) block2_1 = resnet_block(block2_0, 2, 1, num_filters*4) block2_2 = resnet_block(block2_1, 2, 2, num_filters*4) block_output = AveragePooling2D(pool_size=8, name="avg_pool_2_2")(block2_2) block_output_flatten = Flatten(name="flatten_2_2")(block_output) #y = Dense(128)(block_output_flatten) pred_layer_0 = Dense(num_classes, activation='softmax', name='pred_layer_0') pred_layer_1 = Dense(num_classes, activation='softmax', name='pred_layer_1') pred_layer_2 = Dense(num_classes, activation='softmax', name='pred_layer_2') outputs = pred_layer_2(block_output_flatten) model = Model(inputs=[inputs], outputs=[outputs]) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=lr_schedule(0)), metrics=['accuracy']) model.summary() # print(model_type) # Prepare model model saving directory. save_dir = os.path.join(os.getcwd(), 'saved_models') save_dir = "/content/gdrive/My Drive/colab/weights/" model_name = 'svhn_%s_model.{epoch:03d}.h5' % model_type if not os.path.isdir(save_dir): os.makedirs(save_dir) filepath = os.path.join(save_dir, model_name) # Prepare callbacks for model saving and for learning rate adjustment. checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_acc', verbose=1, save_best_only=True) lr_scheduler = LearningRateScheduler(lr_schedule) lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) time_cb = TimingCallback() callbacks = [checkpoint, lr_reducer, lr_scheduler, time_cb] print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( # set input mean to 0 over the dataset featurewise_center=False, # set each sample mean to 0 samplewise_center=False, # divide inputs by std of dataset featurewise_std_normalization=False, # divide each input by its std samplewise_std_normalization=False, # apply ZCA whitening zca_whitening=False, # epsilon for ZCA whitening zca_epsilon=1e-06, # randomly rotate images in the range (deg 0 to 180) rotation_range=0, # randomly shift images horizontally width_shift_range=0.1, # randomly shift images vertically height_shift_range=0.1, # set range for random shear shear_range=0., # set range for random zoom zoom_range=0., # set range for random channel shifts channel_shift_range=0., # set mode for filling points outside the input boundaries fill_mode='nearest', # value used for fill_mode = "constant" cval=0., # randomly flip images horizontal_flip=True, # randomly flip images vertical_flip=False, # set rescaling factor (applied before any other transformation) rescale=None, # set function that will be applied on each input preprocessing_function=None, # image data format, either "channels_first" or "channels_last" data_format=None, # fraction of images reserved for validation (strictly between 0 and 1) validation_split=0.0) # Compute quantities required for featurewise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) # + id="tmQlq24uGWZV" colab_type="code" outputId="2e6976c6-e92d-4be5-9a92-12f6e8432e8a" colab={"base_uri": "https://localhost:8080/", "height": 17438} # Fit the model on the batches generated by datagen.flow(). hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), validation_data=(x_test, [y_test]), steps_per_epoch=2290, # num images/32 epochs=epochs, verbose=2, workers=10, callbacks=callbacks) hist_df = pd.DataFrame(hist.history) hist_df['times'] = time_cb.times[-hist_df.shape[0]:] hist_df.to_csv('/content/gdrive/My Drive/colab/weights/svhn_training_history_resnetv1_noshare{}.csv'.format(datetime.now().strftime('%Y-%m-%d_%H-%M-%S')), index=True) # Score trained model. scores = model.evaluate(x_test, [y_test], verbose=1) print('Test output:', scores) # + id="3_sqnXY5Oh2f" colab_type="code" colab={} model.save_weights('/content/gdrive/My Drive/colab/weights/svhn_noshare_boost_oct17.h5') # + id="91F_9LrrlAf-" colab_type="code" colab={} 1+2 # + [markdown] id="063Y3g6o3q16" colab_type="text" # Done
svhn_boost_noshare.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Description of the Project # 1. Importing the Libraries and load the datasets # 2. Exploring the data and visualization # 3. Train and create logistic regression model # 4. Evaluation of model(Precision, recall and F1 score), Confusion Matrix # 5. Model Improvements # ### Introdcution: # Regression analysis is used to predict the dependent variable based on the basis of one or more independent varibales. It explains the predict/impact of changes in the independent attributes on the dependent variable. The Logistic regression, also called a logit model, is used to model dichotomous outcome variables. In the logit model, the log odds of the outcome is modeled as a linear combination of the predictor variables. # ### Description of the data # I have collected this data from the URL-https://stats.idre.ucla.edu/ of Institute of Digital Research and Statistical Consulting. This datasets has the following features:- # gre, gpa and rank (There are three predictor variables: gre, gpa and rank). # The dependent variable is admit which has two classes 1 and 0. If he/she admits then assigned 1, if he/she not admit then assigned 0. # ### Evaluation # This project is based on how independent variables such as GRE (Graduate Record Exam scores), GPA (grade point average) and prestige of the undergraduate institution would effect on admission into graduate school. The target variable is binay .i.e. either that student will be going to admit the school or not. After the preprocessing of the data, EDA and generate x and y variables, I built a Logistic regression model, which is checked by confusion_matrix and classification report. import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline #import the data data = pd.read_csv("https://stats.idre.ucla.edu/stat/data/binary.csv") data.head() data.tail(3) data.isnull().sum() data.dtypes data.describe().T # Histogram plt.figure(figsize=(6,5)) plt.hist(data['gpa'], bins=30, color='blue') plt.xlabel('GPA') plt.figure(figsize=(6,5)) plt.hist(data['gre'], bins=30, color='green') plt.xlabel('gre_score') data['admit'].hist(color='red') data['rank'].hist(color='blue') # Let's see the reationship between gpa and gre sns.jointplot(x='gpa', y='gre', data=data, color='g', kind='kde') # Let's see the reationship between gpa and gre sns.jointplot(x='gpa', y='gre', data=data, color='r') # + # use dummies method for rank columns dummy_rank=pd.get_dummies(data['rank'], prefix='rank') dummy_rank.head() # - # Let's check the multicolinarity among the dummy variables # If we find the multicolinarity then we leave one dummy variables # Let's merge two DataFrames data and dummy_rank cols_need = ['admit', 'gre', 'gpa'] data1 = data[cols_need].join(dummy_rank) data1.head() # Let's drop rank_1 columns from dummy variables in order to reduce the multicolinarity data1 = data1.drop('rank_1', axis=1) data1.head() from sklearn.model_selection import train_test_split x = data1.iloc[0:,1:] y = data1.iloc[0:,-6] x.head() y.head() x_train, x_test, y_train, y_test= train_test_split(x, y, test_size=0.3, random_state=23) x_train.shape, y_train.shape, y_train.shape, y_test.shape from sklearn.linear_model import LogisticRegression model = LogisticRegression() model.fit(x_train, y_train) pred = model.predict(x_test) from sklearn.metrics import classification_report, confusion_matrix print(classification_report(y_test, pred)) confusion_matrix(y_test, pred) # + # Let's only take two features gre and gpa # - data1.head() x= data[['gre', 'gpa']] y = data['admit'] x.head() y.head() x.shape, y.shape x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.3, random_state=23) x_train.shape, y_train.shape, x_test.shape, y_test.shape from sklearn.linear_model import LogisticRegression model1 = LogisticRegression() model1.fit(x_train, y_train) pred1 = model1.predict(x_test) pred1 from sklearn.metrics import accuracy_score accuracy_score(y_test, pred1) print(classification_report(y_test, pred1)) confusion_matrix(y_test, pred1) # #### Reference: https://stats.idre.ucla.edu/stata/dae/logistic-regression/
Logistic Regression on Admission Dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Visualizing Rotated MINST samples # + #Common imports import os import random import copy import numpy as np #Pillow from PIL import Image import PIL #Matplotlib from matplotlib.pyplot import imshow #Pytorch import torch import torch.utils.data as data_utils from torchvision import datasets, transforms # + data_obj= datasets.MNIST('../../data/rot_mnist', train=True, download=False, transform=transforms.ToTensor() ) train_loader = torch.utils.data.DataLoader(data_obj, batch_size=60000, shuffle=False) for i, (x, y) in enumerate(train_loader): mnist_imgs = x mnist_labels = y # - to_pil= transforms.Compose([ transforms.ToPILImage(), ]) angles=[0, 15, 30, 45, 60, 75, 90] indice= random.randint(0, mnist_imgs.shape[0]) mnist_img= mnist_imgs[indice] rotated_imgs=[] for angle in angles: rotated_imgs.append( transforms.functional.rotate( to_pil(mnist_img), angle) ) counter=0 for img in rotated_imgs: img.save('../../results/rot_mnist/images/' + str(angles[counter]) + '.jpg') counter+=1
docs/notebooks/helper_plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #This table is necessary in order to convert state name to abbreviation quickly #https://github.com/nquandt98/project1cosc3570nquandt us_state_abbrev = { 'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID', 'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA', 'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS', 'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD', 'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY', 'District of Columbia' : 'MD', } # + from datascience import * import plotly.plotly as py import plotly.graph_objs as go import plotly.figure_factory as ff import numpy import urllib import requests import pandas as pd df = pd.read_csv('https://raw.githubusercontent.com/nquandt98/project1cosc3570nquandt/master/cc_institution_details.csv', encoding='ANSI') #add state abbreviations to table stateindex = df.columns.get_loc("state")+1 i=0 result_array = numpy.array([]) for row in df.itertuples(): result_array = numpy.append(result_array, str(us_state_abbrev[row[stateindex]])) i+=1 print(i,end="\r") df['state_code'] = pd.Series(result_array) #create a smaller table to change up as needed t = df[['chronname','control','FIPS','retain_value','awards_per_value','aid_value','aid_percentile','exp_award_value','state','state_code','level']] t = t.sort_values(by=['retain_value'],ascending = False) #eliminate 0 rows t = t[t.exp_award_value != 0] t = t[t.aid_value != 0] t.loc[t.exp_award_value == 0, 'exp_award_value'] = numpy.nan t.dropna(inplace=True) #create ratio value t['aid_to_exp'] = t['aid_value']/t['exp_award_value'] #select only public and 4year t = t[t.control == 'Public'] t = t[t.level == '4-year'] # #copy table for cleaned graph t2 = t i='aid_to_exp' t2.loc[t2.aid_to_exp == 0, 'aid_to_exp'] = numpy.nan t2.dropna(inplace=True) t2['Log_' + i] = numpy.abs(numpy.log(t2[i])) q75, q25 = numpy.percentile(t2.Log_aid_to_exp.dropna(), [75 ,25]) iqr = q75 - q25 #use IQR to remove outliers statistically min = q25 - (iqr*1.5) max = q75 + (iqr*1.5) t2['Outlier'] = 0 i ='Log_aid_to_exp' t2.loc[t2[i] < min, 'Outlier'] = 1 t2.loc[t2[i] > max, 'Outlier'] = 1 t2 = t2[t2.Outlier == 0] #create a trace for the graphs barGraphOriginalTrace = go.Bar(y=t.aid_to_exp,x=t.retain_value, name= 'Original') barGraphCleanedTrace = go.Bar(y=t2.aid_to_exp,x=t2.retain_value, name = 'Outliers Removed') scatterGraphOriginalTrace = go.Scatter( x=t.retain_value,y=t.aid_to_exp, mode = 'markers', name = 'Original' ) scatterGraphCleanedTrace = go.Scatter( x=t2.retain_value,y=t2.aid_to_exp, mode = 'markers', name = 'Outliers Removed' ) #compile traces over eachother bardata = [barGraphOriginalTrace,barGraphCleanedTrace] scatterdata = [scatterGraphOriginalTrace,scatterGraphCleanedTrace] #create layouts layout = {'xaxis': {'title': 'Retention Percentage of Students 1st-2nd Year'}, 'yaxis': {'title': 'Ratio of Aid Given to Cost of Attending'}, 'barmode': 'group', 'title': 'Comparison'} layout2 = {'xaxis': {'title': 'Retention Percentage of Students 1st-2nd Year'}, 'yaxis': {'title': 'Ratio of Aid Given to Cost of Attending'}, 'title': 'Scatter Comparison'} barFigure = go.Figure(data = bardata,layout= layout) ScatterFigure = go.Figure(data = scatterdata, layout = layout2) colorscale1 = ['#7A4579', '#D56073', 'rgb(236,158,105)', (1, 1, 0.2), (0.98,0.98,0.98)] histogramFigure = ff.create_2d_density(t2.aid_to_exp, t2.retain_value, colorscale=colorscale1, hist_color='rgb(0, 68, 124)', point_size=3 ) #plot graphs to plotly py.iplot(histogramFigure, filename='histogram_subplots') py.iplot(ScatterFigure, filename = 'ScatterPlot') py.iplot(barFigure, filename = 'BarChart') # + #use cleaned data for futher research #create choropleth for states ratio values values = t2['aid_to_exp'].values print(values.max()) scl = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\ [0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']] data = [ dict( type='choropleth', colorscale = scl, autocolorscale = False, locations = t2['state_code'], z = values.astype(float), locationmode = 'USA-states', text = "", marker = dict( line = dict ( color = 'rgb(255,255,255)', width = 2 ) ), colorbar = dict( title = "Ratio of Aid Given to Cost of Attending" ) ) ] layout = dict( title = 'Aid of Schools', geo = dict( scope='usa', projection=dict( type='albers usa' ), showlakes = True, lakecolor = 'rgb(255, 255, 255)', ), ) fig = dict(data=data, layout=layout) py.iplot(fig, filename='d3-cloropleth-map1') # + #use cleaned data for futher research #create choropleth for states retention values values2 = t2['retain_value'].values print(values2.max()) scl2 = [[0.0, 'rgb(242,240,247)'],[0.2, 'rgb(218,218,235)'],[0.4, 'rgb(188,189,220)'],\ [0.6, 'rgb(158,154,200)'],[0.8, 'rgb(117,107,177)'],[1.0, 'rgb(84,39,143)']] data2 = [ dict( type='choropleth', colorscale = scl2, autocolorscale = True, locations = t2['state_code'], z = values2.astype(float), locationmode = 'USA-states', text = "", marker = dict( line = dict ( color = 'rgb(255,255,255)', width = 2 ) ), colorbar = dict( title = "Percent of Retainment" ) ) ] layout2 = dict( title = 'Retention Rate', geo = dict( scope='usa', projection=dict( type='albers usa' ), showlakes = True, lakecolor = 'rgb(255, 255, 255)', ), ) fig2 = dict(data=data2, layout=layout2) py.iplot(fig2, filename='d3-cloropleth-map2') # + #create a sample table for the paper table1 = Table.read_table('https://raw.githubusercontent.com/nquandt98/project1cosc3570nquandt/master/cc_institution_details.csv', encoding='ANSI') table1 = table1.sample(10).select('chronname','state','control','student_count','aid_value','exp_award_value') table1 = table1.with_column('aid_to_exp', table1.column('aid_value')/table1.column('exp_award_value')) tabledf = table1.to_df() tablefile = go.Table( header=dict(values=['College','State','Type','Student Count','Financial Aid Given', 'Cost of Attending til Gradution', 'Aid/Cost'], fill = dict(color='#C2D4FF'), align = ['left'] * 5), cells=dict(values=[tabledf.chronname,tabledf.state,tabledf.control,tabledf.student_count,tabledf.aid_value,tabledf.exp_award_value,tabledf.aid_to_exp], fill = dict(color='#F5F8FF'), align = ['left'] * 5)) datatable = [tablefile] figtable = go.Figure(data=datatable) py.iplot(datatable, filename = 'example_sample') # + #create a scatter of california schools only #attempt to say california has higher rates and better ratios californiaschools = t[t.state == 'California'] caltrace = go.Scatter( x=californiaschools.retain_value,y=californiaschools.aid_to_exp, mode = 'markers', name = 'California Only', marker = dict( size = 10, color = 'rgba(152, 0, 0, .8)', line = dict( width = 2, color = 'rgb(0, 0, 0)' ) ) ) linetrace = go.Scatter( x = [70,100], y = [.15,.15], mode = 'lines', name = '.15 Line') calscatterdata = [caltrace,linetrace] callayout = {'xaxis': {'title': 'Retention Percentage of Students 1st-2nd Year'}, 'yaxis': {'title': 'Ratio of Aid Given to Cost of Attending'}, 'title': 'Scatter California'} calfig = go.Figure(data = calscatterdata, layout = callayout) py.iplot(calfig, filename = 'CalScatterPlot') # + #this is extra code that I had use to convert lat long to FIPS, #but because it is a json response parser the code takes to long to run #everytime thus I just replaced my entire csv with the new one #latindex = df.columns.get_loc("lat_y")+1 #lonindex = df.columns.get_loc("long_x")+1 #i=0 #result_array = numpy.array([]) #for row in df.itertuples(): #url = ('https://geo.fcc.gov/api/census/block/find?latitude=' + str("%.2f" % float(row[latindex])) + '&longitude=' + str("%.2f" % float(row[lonindex])) + '&format=json') #response = requests.get(url) #locdata = response.json() # result_array = numpy.append(result_array, str(locdata['County']['FIPS'])) # i+=1 # print(i,end="\r") #df['FIPS'] = pd.Series(result_array) #df.to_csv('df21.csv', encoding='ANSI', index=False)
project1/quandt-project1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 9.5.beta0 # language: sage # name: sagemath # --- # 1. My laptop has been fully erased due to a storage problem, part of my data and document has been missed. # 2. have checked `fan.py` and `cone.py` file, found the following places that could be modified potentially. # `fan.py` # # 1. function `is_face_of` (in class `cone`) # # 2. function `self._contains` (in class `RationalPolyhedralFan`), #1569, #1913, `self.__contains__` # # 3. cone.intersection (in class `cone`) (#610) # # `cone.py` # # 1. `cone._contains()` #1624 # # 2. `cone._ambient_space_point`, ..., relative._interior_point, # # 3. #2326 embed # # 4. `cone.intersections`, #2997 # # 5. `is_face_of` #3122 # If cone $A$ and cone $B$ are intersected, then their token $A'$, $B'$, whose vertexes comes from intersections between surface of ojbects $||x||_1 <= 1$ and generating rays of each cones, are also intersected. In turn, their boxes in rtree also interesected. # # Thus, if their boxes in rtree are not intersected with each other, then cone $A$ and cone $B$ will not intersected with each other. # # In addition, if the intersection of cone $A$ and cone $B$ is only one ray, then the above claim still works. # # For the most of above cases, `rtree` was used when element-to-set happens, such as face-to-cone, (`is_face_of`), cone-to-fan, (`RationalPolyhedralFan._contains`). We can always use preseted linear project vector generated from the set to give the highest ability of seperation. # # And we can build a new subclass of those class for a `rtree` flavor. # For the peer-to-peer operation, such as `cone.intersection()`, we can also use `rtree`. # From tokens to the boxes, the linear project vectors play an important role for transformation and should be chosed carefully. # # 1. Since all of those tokens will be uniformly transformed by those vectors, they should be decided by in advanced, (it could be adaptively decided, but how to reply is another problem). # # 2. It is better to make them random unless the distribution of those cones has been announced. # # 3. It should not be too few, since that will jeopardize the ability of separation. # # 4. It should not be too much, since that will potentially increase the complexity of computation. # # Considering all above cases, we can use the orthonormal basis `N(1,0,0, ..., 0)`, `N(0,1,0,...,0)`, ...`N(0,0,0,...,1)` as the basis for linear project vectors. # #
logfiles/log43.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: .venv-mpl # language: python # name: .venv-mpl # --- # # Clustering CIML # # Clustering experiment on CIML. # # **Motivation:** During CIML supervised learning on multiple classification experiments, where the classes are cloud operators providing the VMs to run CI jobs, the classes predicted with the best metrics were those with the higher amount of samples in the dataset. # We want to evaluate if unsupervised learning can group those cloud providers with high support in separate clusters. # # Clustering algorithm: k-means. # <br>Method for deciding the number of clusters: elbow method and silhouette score. # from ciml import gather_results from ciml import tf_trainer from sklearn.cluster import KMeans from sklearn.mixture import GaussianMixture import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import pandas as pd from mpl_toolkits.mplot3d import Axes3D import matplotlib.cm as cmx import matplotlib.colors as pltcolors import matplotlib.pyplot as plt import plotly.express as px from plotly.subplots import make_subplots from sklearn import metrics from scipy.spatial.distance import cdist from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.cm as cm # ## Data loading and analysis # From the supervised learning experiments on multiple data classification on CIML data, the best results were obtained for the following experiment: # * Features from dstat data: User CPU `usr` and Average System Load `1m`. # * Data resolution: 1 minute # * Classes reduction: cloud providers with several regions were mapped to a single class. # * Model hyperparameters: # * NW topology: DNN with 3 hidden layers and 100 units per layer. # * Activation function: RELU. # * Output layer: Sigmoid. # * Initial learning rate: 0.05 # * Optimizer: Adagrad # # We will load the dataset used for this experiment and analyse the distribution of samples per cloud provider. #Define datapath #data_path = '/Users/kw/ciml_data/cimlodsceu2019seed' data_path = '/git/github.com/kwulffert/ciml_experiments/data' #dataset = 'usr_1m-10s-node_provider' dataset = 'usr_1m-1min-node_provider' #Dataset including classes labels = gather_results.load_dataset(dataset, 'labels', data_path=data_path)['labels'] training_data = gather_results.load_dataset(dataset, 'training', data_path=data_path) test_data = gather_results.load_dataset(dataset, 'test', data_path=data_path) config = gather_results.load_model_config(dataset, data_path=data_path) classes = training_data['classes'] examples = training_data['examples'] example_ids = training_data['example_ids'] # Create an int representation of class unique_classes = list(set(classes)) dict_classes = dict(zip(unique_classes, list(range(len(unique_classes))))) int_classes = [dict_classes[x] for x in classes] df_data = pd.DataFrame(examples, columns=labels, index=example_ids) df_data['classes'] = int_classes # The dataset contains 185 feautures and 2377 samples. Each sample is a CI job run. #Let's have a look at the data df_data.shape # We now list the cloud provider clases in the dataset and see how many samples the dataset contains per class. #Cloud providers in the dataset and their numerical mapping classes_count = pd.DataFrame.from_dict(dict_classes, orient='index').reset_index() classes_count = classes_count.rename(columns={'index':'cloud_prov',0:'id'}) classes_count #Add the total amount of samples in the dataset per cloud provider to have an overall view of the dataset total_count = pd.DataFrame(df_data['classes'].value_counts()).add_suffix('_count').reset_index() classes_count['count'] = classes_count.apply( lambda x: (total_count[total_count['index']==x['id']]['classes_count']).values[0], axis=1, result_type = 'expand') classes_count.sort_values(by='count', ascending=False) # ## Determine the optimal number of clusters # Next step is to determine the optimal number of clusters for training our k-means clustering model. # <br>We will use the elbow method and the silhouette score to find out their recommendation. #Numpy representation of the dataframe df_data. #This representation is needed for calculating the silhouette coefficients. cluster_examples = df_data.to_numpy() cluster_examples.shape # ### Elbow method # In cluster analysis, the elbow method is a heuristic used in determining the number of clusters in a data set. # <br>The method consists of plotting the explained variation as a function of the number of clusters, and picking the elbow of the curve as the number of clusters to use.[1](https://en.wikipedia.org/wiki/Elbow_method_(clustering)#:~:text=In%20cluster%20analysis%2C%20the%20elbow,number%20of%20clusters%20to%20use.) # + # k means determine k using elbow method distortions = [] K = range(1,10) X = cluster_examples for k in K: kmeanModel = KMeans(n_clusters=k).fit(X) kmeanModel.fit(X) distortions.append(sum(np.min(cdist(X, kmeanModel.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0]) # Plot the elbow plt.plot(K, distortions, 'bx-') plt.xlabel('k') plt.ylabel('Distortion') plt.title('The Elbow Method showing the optimal k') plt.show() # - # The elbow method suggest running k-means with 2 clusters. # ### Silhouette score # The elbow method can be ambiguous, as an alternative the average silhouette method can be used. # <br>The silhouette value is a measure of how similar an object is to its own cluster (cohesion) compared # <br>to other clusters (separation). The silhouette ranges from −1 to +1, where a high value indicates that # <br>the object is well matched to its own cluster and poorly matched to neighboring clusters. # <br>If most objects have a high value, then the clustering configuration is appropriate. # <br>If many points have a low or negative value, then the clustering configuration may have too many or too few clusters. [2](https://en.wikipedia.org/wiki/Silhouette_(clustering)#:~:text=Silhouette%20refers%20to%20a%20method,consistency%20within%20clusters%20of%20data.&text=The%20silhouette%20ranges%20from%20%E2%88%921,poorly%20matched%20to%20neighboring%20clusters.) # + X = cluster_examples range_n_clusters = (2,3,4,5,6,7,8) for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=555) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.nipy_spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k') # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200, edgecolor='k') for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50, edgecolor='k') ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show() # - # For 2,3,5 and 6 clusters, the silhouette coefficient has higher values with best clustering separation for 2 clusters. # ## Clustering Experiments # We run now the experiment using k-means with two, three, five and six clusters and evaluate how the cloud providers are grouped in them. # <br>First we define the functions to execute the training and create an overview of the results. experiments = [2,3,5,6] data_clusters = df_data.copy() data_clusters.head() def k_training(c): clusterer = KMeans(n_clusters=c, random_state=555) cluster_labels = clusterer.fit_predict(X) k_labels = clusterer.labels_ data_clusters['clusters_'+str(c)] = k_labels #Create a dataframe with the original dataset and the resulting cluster label found during training of k-means. classes_totals = data_clusters['classes'].value_counts() # We define a function to produce an overview of the resulting clustering including: # * List of cloud providers in each cluster. # * Percentage of the overall samples of the cloud provider included in the cluster `pclass`. # * Percentage of the cluster covered by the cloud provider `pcluster`. def statistics(c): clusters_totals = data_clusters['clusters_'+str(c)].value_counts() stats = pd.DataFrame(data_clusters.groupby(by=['clusters_'+str(c),'classes'])['classes'].count()) stats = stats.add_suffix('_count').reset_index() stats['p_class'] = (stats.apply( lambda x: 100*x['classes_count']/classes_totals[x['classes']], axis=1, result_type = 'expand')).round(2) stats['p_cluster'] = (stats.apply( lambda x: 100*x['classes_count']/clusters_totals[x['clusters_'+str(c)]], axis=1, result_type = 'expand')).round(2) stats['cloud_prov'] = stats.apply( lambda x: (classes_count[classes_count['id']==x['classes']]['cloud_prov']).values[0], axis=1, result_type = 'expand') return stats # We define a function to highlight in the table returned by `stats` the class with biggest coverage within a cluster. def highlight_biggestclass(row): # if row.p_cluster > 50: # return ['background-color: cyan']*6 # else: # return ['background-color: white']*6 return ['background-color: orange' if (row.p_cluster > 50) else 'background-color: cyan' if (row.p_class > 50) else 'background-color: white']*6 # # Experiments runs and results # Comparing with the amount of samples of each cloud provider in the original dataset classes_count.sort_values(by='count', ascending=False) # ## Experiment with 2 clusters k_training(2) stats = statistics(2) stats.style.apply(highlight_biggestclass, axis=1) # Besides cloud operator `vexxhost`, which is distributed in the two clusters, the remaining cloud operators are separated in the two clusters. # <br>However, this result is not significant for the aim of our experiments. # ## Experiment with 3 clusters k_training(3) stats = statistics(3) stats.style.apply(highlight_biggestclass, axis=1) # Clustering of the cloud providers is divisive and not significant. # ## Experiment with 4 clusters k_training(4) stats = statistics(4) stats.style.apply(highlight_biggestclass, axis=1) # Three of the cloud operators have predominance in separate clusters. # <br>Cloud operator `rax` is the one with highest supper in the dataset and dominates cluster 2 even though with only 20% of samples of its class. # <br>Cloud operator `inap` is grouped in a cluster with little noise and 99.69% of its samples. # <br>Cloud operator `ovh` is grouped in a separate cluster with little noise and 99.01% of its samples. # ## Experiment with 5 clusters k_training(5) stats = statistics(5) stats.style.apply(highlight_biggestclass, axis=1) # <br>Cloud operator `inap` is grouped in a cluster with 99.69% of its samples and even less noise as in the experiment with 4 clusters. # <br>Cloud operators `rax` and `ovh` also have separate clusters with high class and cluster coverage. However they are also predominant in other two clusters as they have more samples as the remaining operators. # ## Experiment with 6 clusters k_training(6) stats = statistics(6) stats.style.apply(highlight_biggestclass, axis=1) # The resulting clustering is noise with exception of cloud operator `inap` # ### Conclusion # # Although the elbow method suggested 2 clusters and the silhouette score recommended 2 or 3 clusters as optimal number of clusters value for the clustering training, in the resulting experiments, the clustering with better differentiation among cloud providers was with 4 clusters. # <br>We are not considering the experiment with 2 clusters the best result as we wanted to evaluate how many operators with high support a clustering algorith could group. # # For experiments with more than 3 clusters, the cloud operator `inap` was grouped in a separate cluster with very little noise and a 99.69% of its samples. This result indicates that the dstat data generated when running CI jobs on `inap` VM has a combination of values discernible enough for k-means to group them efficiently. # # The top three cloud operators with higher support in the dataset (`rax`, `ovh` and `inap`) could be grouped in different clusters. # # Cloud operator `rax` has the highest support and had an unique cluster only for the experiment with 2 clusters, otherwise it was split into two clusters with the highest coverage of 79% of samples in a cluster for the experiment with 3 and 4 clusters. This might be due to the regions that were reduced to a single class. # # Cloud operator `ovh` had the best coverage of samples in a single cluster for the experiment with 4 clusters (99%). # # In general, the dstat data from the CI jobs has potential for further exploration using unsupervised learning. <br>Especially clustering of failed CI jobs could help engineers to better triage failures coming from the gate pipeline when considering the CI system in Openstack. Thsi approach could be used in other CI systems as well.
Clustering CIML.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <a href="https://www.kaggle.com/mohamedahmedx2/notebook-isic-omdena-v1-0?scriptVersionId=89871097" target="_blank"><img align="left" alt="Kaggle" title="Open in Kaggle" src="https://kaggle.com/static/images/open-in-kaggle.svg"></a> # + [markdown] _uuid="a497a817-c08d-47f0-9360-783017b9395c" _cell_guid="2d63c794-098a-49fc-af65-d9e4a8f6b56e" # # **WORK IN PROGRESS** # - # downloaded resized dataset [here](https://www.kaggle.com/cdeotte/jpeg-melanoma-256x256). # + [markdown] _uuid="425b5915-9a9c-4317-a0e4-b8d36e064064" _cell_guid="faf16ab1-c9c3-4d13-b8ca-8013ebfa5550" # # Importing libraries # + _uuid="735c2351-0b52-4a89-871c-a897ed865d7b" _cell_guid="17b97765-a33b-49a4-a6e0-cf61281e0d39" jupyter={"outputs_hidden": false} import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn import preprocessing,metrics from sklearn.utils import shuffle import tensorflow as tf from tensorflow import keras from tensorflow.keras.utils import to_categorical from tensorflow.keras import layers from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Dense, Flatten, BatchNormalization from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img, img_to_array, array_to_img import cv2 from tqdm import tqdm import os from PIL import Image import gc from keras.callbacks import EarlyStopping from imblearn.over_sampling import RandomOverSampler from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix import seaborn as sns import tensorflow_addons as tfa print(tf. __version__) # %config Completer.use_jedi = False # makes auto completion work in notebook # + [markdown] _uuid="203cab4c-baa2-4ec2-8dc1-2eb725f07586" _cell_guid="27731f03-f240-450e-87a7-3f5ada8672c0" # ## exploring the metadata csv file # + _uuid="4282dc04-e322-4920-863b-d7bc30cdfbe3" _cell_guid="0d831f62-afb9-40c3-8a80-cc3868c3e52d" jupyter={"outputs_hidden": false} #update with the dataset in use directory = "../input/jpeg-melanoma-256x256/" train = pd.read_csv(directory + 'train.csv') test = pd.read_csv(directory + 'test.csv') # + _uuid="d6a46863-fe56-4635-9858-3e98b284d4b5" _cell_guid="4123ad59-88cc-48e6-87f4-214b07e678d5" jupyter={"outputs_hidden": false} test.head(5) # + _uuid="bc11915e-e1ad-4c05-9e3c-c974a1523921" _cell_guid="003c5aef-bb8c-41fa-894c-3dd4e9f015fd" jupyter={"outputs_hidden": false} # Kaggle users reported some duplicate images in the dataset, which might impact the model, this code removes them dup = pd.read_csv("/kaggle/input/siim-list-of-duplicates/2020_Challenge_duplicates.csv") drop_idx_list = [] for dup_image in dup.ISIC_id_paired: for idx,image in enumerate(train.image_name): if image == dup_image: drop_idx_list.append(idx) print("no. of duplicates in training dataset:",len(drop_idx_list)) train.drop(drop_idx_list,inplace=True) print("updated dimensions of the training dataset:",train.shape) # + [markdown] _uuid="f63def60-571b-4553-be25-48d556d8b8f1" _cell_guid="cd5a4508-678f-483c-b00b-7d1c6596b55e" # # Explore the data # + _uuid="d3973eb6-04cb-47aa-b23f-626d691d9a32" _cell_guid="e1de56c2-0109-4cab-bf22-0114a5aba9a6" jupyter={"outputs_hidden": false} plt.rcParams['figure.figsize'] = (10,10) compare = train["target"].value_counts() print(compare) labels = ['benign','malignant'] sizes = [compare[0],compare[1]] explode = (0, 0.1) fig1, ax1 = plt.subplots() ax1.pie(sizes, explode=explode, labels=labels, autopct='%0.1f%%', shadow=False, startangle=-45) plt.show() # + [markdown] _uuid="ffac8f2e-6cad-4b41-862d-064ce4b29008" _cell_guid="a0e29bbc-f0f2-4796-b345-49f89165b31b" # ### Heavily skewed data, $\approx 2\% $ of the data is malignant, We need to take this into consideration # + _uuid="762eeb80-f052-42a8-b246-7dbd51a93d0c" _cell_guid="8a4fa0a9-1510-4114-8a3f-522103484c10" jupyter={"outputs_hidden": false} df_benign=train[train['target']==0] df_malignant=train[train['target']==1] # + _uuid="7bb13a66-ac10-43ef-a4b0-f69b84467b56" _cell_guid="d9eef7d3-d64c-4d53-b781-afa567387939" jupyter={"outputs_hidden": false} print('Benign Cases') benign=[] df_b=df_benign.head(30) df_b=df_b.reset_index() for i in range(30): img = cv2.imread(directory + "train/" + df_benign['image_name'].iloc[i]+'.jpg') img = cv2.resize(img, (224,224)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32)/255. benign.append(img) f, ax = plt.subplots(5,6, figsize=(15,10)) for i, img in enumerate(benign): ax[i//6, i%6].imshow(img) ax[i//6, i%6].axis('off') plt.show() # + _uuid="85c01efc-259b-43dc-8f20-b0dd328efab4" _cell_guid="65db2386-5e8e-424d-9d07-f7b687a6ecfe" jupyter={"outputs_hidden": false} print('Malignant Cases') malignant=[] df_m=df_malignant.head(30) df_m=df_m.reset_index() for i in range(30): img = cv2.imread(directory + "train/"+ df_m['image_name'].iloc[i]+'.jpg') img = cv2.resize(img, (224,224)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32)/255. malignant.append(img) f, ax = plt.subplots(5,6, figsize=(15,10)) for i, img in enumerate(malignant): ax[i//6, i%6].imshow(img) ax[i//6, i%6].axis('off') plt.show() # + [markdown] _uuid="d87b2dd5-9360-46ea-b55b-2b5768ac80b6" _cell_guid="da64e29f-3c82-4434-82d8-d4eec38ce49c" # ## Setting the dataset # + _uuid="4c47e48b-7884-42d2-8ed4-c4dcfdaf7668" _cell_guid="ad010f48-e8cc-4053-956a-9ba1e536a6b3" jupyter={"outputs_hidden": false} # define parameters for model training, edit before cell for image dimension IMG_DIM = 256 # for input reshape layer batch_size = 512 num_classes = 2 epochs = 30 validation_split = 0.15 # - # Note: Accuracy is not a helpful metric for this task. You can have 99.8%+ accuracy on this task by predicting False all the time. # Note: that the model is fit using a larger than default batch size, this is important to ensure that each batch has a decent chance of containing a few of the postivie (malignant) samples. If the batch size was too small, they would likely have no malignant case to learn from. # # [Tensorflow tutorial on imbalanced data](https://www.tensorflow.org/tutorials/structured_data/imbalanced_data#setup) # + _uuid="0d4b4517-db61-488d-9a44-f2714bba4bc9" _cell_guid="826ded24-8ea2-489b-a1df-b6038e1c1eb4" jupyter={"outputs_hidden": false} #loading images into tf.data.dataset file_paths = train["image_name"].values # need to add .jpg labels = train["target"].values #labels = to_categorical(labels,num_classes) x_train, x_val, y_train, y_val = train_test_split(file_paths, labels, test_size=validation_split, random_state=32, stratify = labels) ds_train = tf.data.Dataset.from_tensor_slices(( 'train/'+ x_train + '.jpg', y_train)) ds_val = tf.data.Dataset.from_tensor_slices(( 'train/'+ x_val + '.jpg', y_val)) #read image, reshape and normalize def read_image(image_file, label): image = tf.io.read_file(directory + image_file) image = tf.image.decode_image(image, dtype=tf.float32, channels=3) #image = tf.cast(image, tf.float32) / 255.0 # not needed for efficenetnet, read documentation image = tf.reshape(image, [IMG_DIM, IMG_DIM, 3]) return image , label #data augmentation def augment(image,label): datagen = tf.keras.preprocessing.image.ImageDataGenerator(width_shift_range=0.01, height_shift_range=0.01, shear_range=0.01, rotation_range=15, zoom_range=0.01) return image, label print("number of training images = {}".format(len(ds_train)), "number of val images = {}".format(len(ds_val))) AUTOTUNE = tf.data.experimental.AUTOTUNE ds_train = ds_train.map(read_image, num_parallel_calls = AUTOTUNE).map(augment).batch(batch_size) ds_train = ds_train.prefetch(AUTOTUNE) ds_val = ds_val.map(read_image, num_parallel_calls = AUTOTUNE).batch(batch_size) ds_val = ds_val.prefetch(AUTOTUNE) # + _uuid="458f918a-fa68-43fd-8b74-3758fe7c7060" _cell_guid="66eb8b2e-881c-42a4-97e2-99616002ba6f" jupyter={"outputs_hidden": false} base_model = tf.keras.applications.efficientnet.EfficientNetB3( input_shape = (IMG_DIM, IMG_DIM, 3), weights = 'imagenet', include_top = False ) base_model.trainable = False inputs = tf.keras.layers.Input(shape=(IMG_DIM, IMG_DIM, 3)) x = base_model(inputs, training = False) x = tf.keras.layers.GlobalAveragePooling2D()(x) x = tf.keras.layers.Dense(512, activation= 'relu')(x) x = tf.keras.layers.Dropout(0.25)(x) outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x) model = tf.keras.Model(inputs=inputs, outputs= outputs) model.summary() # + #tf.keras.backend.clear_session() # + _uuid="6d5ca649-c02f-4736-8031-43a91ddc4439" _cell_guid="a9a702af-06fc-4173-b362-43ac25b4c798" # Compile optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) METRICS = [keras.metrics.BinaryAccuracy(name='accuracy'), keras.metrics.Precision(name='precision'), keras.metrics.Recall(name='recall'), keras.metrics.AUC(name='auc')] lossfunction = tfa.losses.SigmoidFocalCrossEntropy() #lossfunction = tf.keras.losses.BinaryCrossentropy() model.compile( optimizer=optimizer, loss=lossfunction, metrics=METRICS) class_weights = {0: 0.5, 1: 20} es = EarlyStopping(monitor='val_auc', patience=2, verbose=1) model.summary() # - hist = model.fit(ds_train, batch_size = batch_size, validation_data = ds_val, epochs=epochs, verbose=1, class_weight = class_weights, callbacks=[es]) # + [markdown] _uuid="c8f9104c-705a-4f4e-9e2d-d43785cbd653" _cell_guid="0d4cac2f-5d46-43da-99eb-261fd0f9e863" # ___ # ___ # + _uuid="5155cfbf-3732-40fb-a6ba-39b0c6d6d931" _cell_guid="be6081c3-e781-48df-8a82-e397a83c211f" jupyter={"outputs_hidden": false} plt.plot(hist.history['accuracy'], color='r', label="train accuracy") plt.plot(hist.history['val_accuracy'], color='b', label="validation accuracy") plt.title("Test accuracy") plt.xlabel("Number of Epochs") plt.ylabel("Loss") plt.legend() plt.show() # + _uuid="037f0727-cccb-43e8-823c-bc76688c95c6" _cell_guid="d662f340-b8b6-4586-9397-3707fb78244c" jupyter={"outputs_hidden": false} plt.plot(hist.history['loss'], color='r', label="Train loss") plt.plot(hist.history['val_loss'], color='b', label="validation loss") plt.title("Test Loss") plt.xlabel("Number of Epochs") plt.ylabel("Loss") plt.legend() plt.show() # - plt.plot(hist.history['auc'], color='r', label="Train auc") plt.plot(hist.history['val_auc'], color='b', label="validation auc") plt.title("auc") plt.xlabel("Number of Epochs") plt.ylabel("Loss") plt.legend() plt.show() # + def plot_cm(labels, predictions, p=0.5): cm = confusion_matrix(labels, predictions > p) plt.figure(figsize=(10,10)) sns.heatmap(cm, annot=True, fmt="d") plt.title('Confusion matrix @{:.2f}'.format(p)) plt.ylabel('Actual label') plt.xlabel('Predicted label') print('(True postive): ', cm[0][0]) print(' (False Positives): ', cm[0][1]) print('(False Negatives): ', cm[1][0]) print(' (True Negative): ', cm[1][1]) print('total malignant: ', np.sum(cm[1])) val_prediction = model.predict(ds_val) plot_cm(y_val, val_prediction) # + _uuid="b671b1c1-fba8-42eb-8fb3-889eefef5fd8" _cell_guid="01738298-93ca-4021-9f0b-4861be53dc60" jupyter={"outputs_hidden": false} model_save_dir = ('./model_complete_effnetb3_50epochs') #.h5' rename saved model #model.save(model_save_dir + '.h5') # + _uuid="fa674cde-b30c-44f3-a2c8-64438d526e02" _cell_guid="eceb522d-29fa-4f3a-8d9f-7ac00afc8536" jupyter={"outputs_hidden": false} test # + _uuid="3033f45b-025e-436c-8c7c-6e64a018aa9e" _cell_guid="a6048116-89e9-499d-9241-cf11de6c89ee" jupyter={"outputs_hidden": false} saved_model = tf.keras.models.load_model(model_save_dir + '.h5') # + _uuid="14e2e362-0e3c-4721-885a-18cb6a9b5c9d" _cell_guid="bc696b96-f882-49cb-a1f3-baf9290d8143" jupyter={"outputs_hidden": false} file_paths_test = test["image_name"].values # need to add .jpg ds_test = tf.data.Dataset.from_tensor_slices(('test/'+ file_paths_test + '.jpg')) def read_image_test(image_file): image = tf.io.read_file(directory + image_file) image = tf.image.decode_image(image, dtype=tf.float32, channels=3) #image = tf.cast(image, tf.float32) / 255.0 image = tf.reshape(image, [IMG_DIM, IMG_DIM, 3]) return image ds_test = ds_test.map(read_image_test, num_parallel_calls = AUTOTUNE).batch(batch_size) IMG_DIM # + _uuid="607e89ef-aecb-4d0a-8ce2-3cff32fdd098" _cell_guid="c701f6b0-b085-4a86-9618-f775bb2cf732" jupyter={"outputs_hidden": false} len(file_paths_test) # + _uuid="30c59bd8-d449-4509-842a-1a63a98dc039" _cell_guid="0a3a5008-7784-4909-9c05-92096bbf42fd" jupyter={"outputs_hidden": false} prediction=model.predict(ds_test) # + _uuid="8cac1102-e732-465a-ab88-be7739c477e6" _cell_guid="4687b2f3-9412-4c3d-9bf1-41fcacb7999d" jupyter={"outputs_hidden": false} prediction.shape # + _uuid="ca803444-8302-4a56-aa77-adb963d9819e" _cell_guid="41d6cbb5-b1fa-4b2b-a55f-18b34bfa7952" jupyter={"outputs_hidden": false} prediction = pd.DataFrame(prediction) prediction = prediction.idxmax(axis=1) prediction.shape # + _uuid="8baa8762-885c-48f0-a1e8-cd3977e64e3e" _cell_guid="8528512d-8c0d-46f0-b875-9fba4ac87406" jupyter={"outputs_hidden": false} output_results_pd = pd.read_csv("../input/jpeg-melanoma-256x256/sample_submission.csv") output_results_pd['target'] = prediction.ravel().tolist() # + _uuid="82a56583-68b2-4782-8fb2-4d930aae6aa0" _cell_guid="4d42af1a-3765-49ef-b046-5e04ddfc2b8e" jupyter={"outputs_hidden": false} submission_file = output_results_pd.to_csv(model_save_dir +".csv", index = False) # + [markdown] _uuid="7e5902da-bbf0-42bc-8631-e68448d1881d" _cell_guid="b4881765-5496-4883-9c4b-564548ff656d" # # **Work in progress** # - # **conclusion:** # the model is suffering from extreamly under-represented class under fitting, which is appearnt as the accuracy of train and val is high however the precision and recall are quite low even when using focal loss based on the paper found [here](https://arxiv.org/abs/1708.02002) is implemented in the model # # **in progress:** # implemnt of class_weight for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class.
Melanoma Classification using Convolution Neural Network.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="o3h-MA4Gk7As" # # BeautifulSoup, Mecab, WordCloudを使って千鳥の漫才を可視化 # BeautifulSoup # http://kondou.com/BS4/ # Mecab # https://taku910.github.io/mecab/ # WordCloud # https://github.com/SamuraiT/mecab-python3#installation # # # <a href="https://colab.research.google.com/github/kaz12tech/ai_demos/blob/master/mecab_wordcloud.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 104183, "status": "ok", "timestamp": 1642748333674, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="BbluyW3IJQ_z" outputId="55a81d95-7eda-4874-e7a9-6a2f991bf7d3" #Mecabのインストール # !pip install mecab-python3 # !pip install unidic-lite # !git clone --depth 1 https://github.com/neologd/mecab-ipadic-neologd.git # !sudo apt install mecab libmecab-dev mecab-ipadic-utf8 git make curl xz-utils file # %cd /content/mecab-ipadic-neologd # !./bin/install-mecab-ipadic-neologd -n # yesを入力 # + executionInfo={"elapsed": 420, "status": "ok", "timestamp": 1642748334080, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="qpMrAhwruZi8" import MeCab from wordcloud import WordCloud from collections import Counter from bs4 import BeautifulSoup import requests import json import re from wordcloud import WordCloud # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8084, "status": "ok", "timestamp": 1642748342158, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="TWOntYLAa_vu" outputId="fca893c6-e23c-434d-e622-48330e189b20" # 日本語フォントをインストール # !apt -y install fonts-ipafont-gothic # + executionInfo={"elapsed": 45, "status": "ok", "timestamp": 1642748342159, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="08eSwCCwY-3G" def get_noun(text): #MeCabで形態素解析 mecab = MeCab.Tagger('-Ochasen -d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd') node = mecab.parseToNode(text) words = [] while node is not None: #品詞と品詞細分類1を抽出 pos_type = node.feature.split(',')[0] subtype = node.feature.split(',')[1] #品詞が名詞、品詞細分類1が一般のとき if pos_type in ['名詞']: if subtype in ['一般']: # "ー"だけは除去 if node.surface != "ー": words.append(node.surface) node = node.next # 出現数を集計し、ソート words_count = Counter(words) result = words_count.most_common() return result # + executionInfo={"elapsed": 320, "status": "ok", "timestamp": 1642748971347, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="pvgn_d8SZnF8" def show_wordcloud(result): # 辞書型に変換 dic_result = dict(result) # Word Cloudで画像生成 wordcloud = WordCloud( background_color='black', font_path='/usr/share/fonts/truetype/fonts-japanese-gothic.ttf', width=900, height=600, colormap='tab10' ).fit_words(dic_result) # 画像の表示 import matplotlib.pyplot as plt from matplotlib import rcParams plt.figure(figsize=(15,10)) plt.imshow(wordcloud) plt.axis("off") plt.show() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1642748342160, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="QuHOOGRNJ_FU" outputId="a41457c8-e47d-4296-adc7-ba64ff532e69" #試しにMeCabで形態素解析 mecab = MeCab.Tagger('-Ochasen -d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd') data = mecab.parse('これからWebスクレイピングで漫才のテキストデータを取得しWordCloudでデータビジュアライゼーションします。') print(data) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1518, "status": "ok", "timestamp": 1642748343654, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="y_huX1qxZ9A5" outputId="5892b068-3733-4a58-a7a8-23d2f07012a2" url = 'https://www.smule.com/song/%E5%8D%83%E9%B3%A5-%E6%BC%AB%E6%89%8D-%E3%82%AF%E3%82%BB%E3%81%AE%E3%81%99%E3%81%94%E3%81%84%E5%AF%BF%E5%8F%B8%E5%B1%8B-%E5%8D%83%E9%B3%A5-karaoke-lyrics/6268102_6268102/arrangement' res = requests.get(url) print(res) # 呼びすぎると418が返るため注意 # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 52, "status": "ok", "timestamp": 1642748343656, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="dxDhSk27bKY1" outputId="14899745-8137-4699-f4bb-70f1186e221c" # BeautifulSourpでWebページから情報取得 soup = BeautifulSoup(res.content, "lxml", from_encoding='utf-8') for script_tag in soup.find_all('script'): if 'window.DataStore' in str(script_tag): target_text = '' text = script_tag.get_text() # 改行で分割 lines = text.splitlines() for line in lines: if "Song:" in line: target_text = line #"Song:"を除去 target_text = target_text.replace("Song:", "") # 末尾の","を除去 target_text = target_text[::-1].replace(",", "", 1)[::-1] dialogue = json.loads(target_text)['lyrics'] print(dialogue) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34, "status": "ok", "timestamp": 1642748343657, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="HbeZJD6vodnL" outputId="4e3d0cfd-cbe1-4fc9-afeb-c3e953e77d17" # tag除去 dialogue = re.sub('<.+?>', '', dialogue) # ドゥーゾーはドゥーゾに統一 dialogue = dialogue.replace("ドゥーゾー", "ドゥーゾ") print(dialogue) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 11170, "status": "ok", "timestamp": 1642748371877, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="UuBn-4UCWMEV" outputId="828fc81c-8470-4138-febb-4d70d4eb57da" # %cd /content/ # 青空文庫から羅生門をダウンロード # !curl -O "https://www.aozora.gr.jp/cards/000879/files/127_ruby_150.zip" # zipファイルを解凍 # !unzip 127_ruby_150.zip # 文章部分のみを別ファイルに保存 # !sed -n 18,54p rashomon.txt > rashomon_content.txt #テキストファイル読み込み file = open('/content/rashomon_content.txt',encoding = 'shift_jis') book_text = file.read() # + executionInfo={"elapsed": 503, "status": "ok", "timestamp": 1642748376102, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="XwHHIbfojcyd" #MeCabで形態素解析 # 漫才 manzai_result = get_noun(dialogue) # 羅生門 book_result = get_noun(book_text) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"elapsed": 4158, "status": "ok", "timestamp": 1642748982682, "user": {"displayName": "temp", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "07335079224440768751"}, "user_tz": -540} id="zmfAGqPWjrgi" outputId="c3a7a883-a05f-4446-c214-d4d220fb243d" # Word Cloudで可視化 show_wordcloud(manzai_result) show_wordcloud(book_result)
mecab_wordcloud.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Real-time analysis import numpy as np import xarray as xr from datetime import datetime from xhistogram.xarray import histogram from rasterio import features import rioxarray import matplotlib.pyplot as plt from affine import Affine from ast import literal_eval from shapely.geometry import Polygon import _pickle as pickle # ## Utils # **plot_hist** def plot_hist(x_min, count): width = x_min[1]-x_min[0] width -= width/5. x_min += width/(5.*2) per = count/count.sum()*100 plt.figure(figsize=(10,5)) plt.bar(x_min, per, width=width) plt.plot([0,0], [0,per.max()], color = 'k', linestyle = '--') plt.title('Soil Organic Carbon Stock') plt.xlabel('SOC stock t C/ha)') plt.ylabel('(%) of total area') # **transform_from_latlon** def transform_from_latlon(lat, lon): lat = np.asarray(lat) lon = np.asarray(lon) trans = Affine.translation(lon[0], lat[0]) scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0]) return trans * scale # **rasterize** def rasterize(shapes, coords, latitude='latitude', longitude='longitude', fill=np.nan, **kwargs): """Rasterize a list of (geometry, fill_value) tuples onto the given xray coordinates. This only works for 1d latitude and longitude arrays. """ transform = transform_from_latlon(coords[latitude], coords[longitude]) out_shape = (len(coords[latitude]), len(coords[longitude])) raster = features.rasterize(shapes, out_shape=out_shape, fill=fill, transform=transform, dtype=float, **kwargs) spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]} return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude)) # ## Read `xarray.Dataset` from `Zarr` in Amazon S3 bucket # + dataset_type = 'experimental-dataset' group = 'stocks' with open(f'../data/{dataset_type}_{group}.pkl', 'rb') as input: ds = pickle.load(input) ds # - # ## Zonal statistics # **Polygon** # + polygon = { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ -63.34716796874999, -34.234512362369856 ], [ -64.22607421875, -35.17380831799957 ], [ -63.896484375, -35.78217070326606 ], [ -63.34716796874999, -35.88905007936092 ], [ -62.86376953124999, -35.46066995149529 ], [ -62.51220703125, -35.08395557927643 ], [ -62.49023437499999, -34.57895241036947 ], [ -63.34716796874999, -34.234512362369856 ] ] ] } } ] } geometry = Polygon(polygon.get('features')[0].get('geometry').get('coordinates')[0]) geometry # - # **Create the data mask by rasterizing the vector data** # %%time shapes = zip([geometry], range(1)) da_mask = rasterize(shapes, ds.coords, longitude='lon', latitude='lat').rename('mask') ds['mask'] = da_mask # ## Change # **Input variables** years = ['1982', '2017'] depth = '0-30' nBinds=80 bindsRange=[-50, 50] # **Computation** # + # %%time start_date = np.datetime64(datetime.strptime(f'{years[0]}-12-31', "%Y-%m-%d")) end_date = np.datetime64(datetime.strptime(f'{years[1]}-12-31', "%Y-%m-%d")) xmin, ymax, xmax, ymin = geometry.bounds xds_index = ds.where(ds['mask'].isin(0.0)).sel(lon=slice(xmin, xmax), lat=slice(ymin, ymax)) # Get difference between two dates diff = xds_index.loc[dict(time=end_date, depth=depth)] - xds_index.loc[dict(time=start_date, depth=depth)] # Get counts and binds of the histogram bins = np.linspace(bindsRange[0], bindsRange[1], nBinds+1) h = histogram(diff.stocks, bins=[bins], dim=['lat', 'lon']) count = h.values mean_diff = diff['stocks'].mean(skipna=True).values # - # **Output values** print(f'Soil Organic Carbon Stock Change: {mean_diff/(int(years[1])-int(years[0]))} t C/ha year') x_min = bins[:-1] plot_hist(x_min, count) # ## Time series # **Computation** # + # %%time years = [int(str(x).split('-')[0]) for x in ds.coords.get('time').values] xmin, ymax, xmax, ymin = geometry.bounds xds_index = ds.where(ds['mask'].isin(0.0)).sel(depth='0-30', lon=slice(xmin, xmax), lat=slice(ymin, ymax)) values = xds_index['stocks'].mean(['lon', 'lat']).values # - # **Output values** plt.plot(years, values) # ## Cloud function # **`main.py` file** # + import numpy as np import xarray as xr from xhistogram.xarray import histogram from datetime import datetime from affine import Affine from rasterio import features from shapely.geometry import Polygon import _pickle as pickle import json class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) def transform_from_latlon(lat, lon): lat = np.asarray(lat) lon = np.asarray(lon) trans = Affine.translation(lon[0], lat[0]) scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0]) return trans * scale def rasterize(shapes, coords, latitude='latitude', longitude='longitude', fill=np.nan, **kwargs): """Rasterize a list of (geometry, fill_value) tuples onto the given xray coordinates. This only works for 1d latitude and longitude arrays. """ transform = transform_from_latlon(coords[latitude], coords[longitude]) out_shape = (len(coords[latitude]), len(coords[longitude])) raster = features.rasterize(shapes, out_shape=out_shape, fill=fill, transform=transform, dtype=float, **kwargs) spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]} return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude)) def compute_values(ds, geometry, years, depth, variable, dataset_type, group, nBinds, bindsRange): if dataset_type == 'global-dataset' and group == 'historic': start_date = years[0] end_date = years[1] mean_years = ds.coords.get('time').values else: start_date = np.datetime64(datetime.strptime(f'{years[0]}-12-31', "%Y-%m-%d")) end_date = np.datetime64(datetime.strptime(f'{years[1]}-12-31', "%Y-%m-%d")) mean_years = [int(str(x).split('-')[0]) for x in ds.coords.get('time').values] xmin, ymax, xmax, ymin = geometry.bounds ds_index = ds.where(ds['mask'].isin(0.0)).sel(depth='0-30', lon=slice(xmin, xmax), lat=slice(ymin, ymax)) # Get difference between two dates diff = ds_index.loc[dict(time=end_date)] - ds_index.loc[dict(time=start_date)] # Get counts and binds of the histogram if dataset_type == 'experimental-dataset' and variable == 'concentration': diff = diff[variable]/10. else: diff = diff[variable] bins = np.linspace(bindsRange[0], bindsRange[1], nBinds+1) h = histogram(diff, bins=[bins], dim=['lat', 'lon']) counts = h.values mean_diff = diff.mean(skipna=True).values mean_values = ds_index[variable].mean(['lon', 'lat']).values return counts, bins, mean_diff, mean_years, mean_values def serializer(counts, bins, mean_diff, mean_years, mean_values): return { 'counts': counts, 'bins': bins, 'mean_diff': mean_diff, 'mean_years': mean_years, 'mean_values':mean_values } def analysis(request): #request = request.get_json() # Read xarray.Dataset from pkl dataset_type = request['dataset_type'] group = request['group'] with open(f'../data/{dataset_type}_{group}.pkl', 'rb') as input: ds = pickle.load(input) # Create the data mask by rasterizing the vector data geometry = Polygon(request['geometry'].get('features')[0].get('geometry').get('coordinates')[0]) shapes = zip([geometry], range(1)) da_mask = rasterize(shapes, ds.coords, longitude='lon', latitude='lat').rename('mask') ds['mask'] = da_mask # Compute output values counts, bins, mean_diff, mean_years, mean_values = compute_values(ds, geometry, request['years'], request['depth'], request['variable'], request['dataset_type'], request['group'], request['nBinds'], request['bindsRange']) return json.dumps(serializer(counts, bins, mean_diff, mean_years, mean_values), cls=NpEncoder) # - payload = { "dataset_type": 'experimental-dataset', "group": 'stocks', "years": ['1982', '2017'], "depth": '0-30', "variable": 'stocks', "nBinds": 80, "bindsRange": [-50, 50], "geometry": { "type": "FeatureCollection", "features": [ { "type": "Feature", "properties": {}, "geometry": { "type": "Polygon", "coordinates": [ [ [ -63.34716796874999, -34.234512362369856 ], [ -64.22607421875, -35.17380831799957 ], [ -63.896484375, -35.78217070326606 ], [ -63.34716796874999, -35.88905007936092 ], [ -62.86376953124999, -35.46066995149529 ], [ -62.51220703125, -35.08395557927643 ], [ -62.49023437499999, -34.57895241036947 ], [ -63.34716796874999, -34.234512362369856 ] ] ] } } ] } } analysis(payload)
processing/Real-time_analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Notes: # This notebook is to predict demand of Victoria state # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from tsa_utils import * from statsmodels.tsa.stattools import pacf from sklearn.ensemble import RandomForestRegressor import warnings warnings.filterwarnings("ignore") # show float in two decimal form plt.style.use('ggplot') pd.set_option('display.float_format',lambda x : '%.2f' % x) # - # ## 1) Load dataset # Load external data set and some simple preprocessing solar = pd.read_csv("../../data/external/solar.csv") solar = solar.loc[solar['Year'].isin([2018, 2019, 2020, 2021])].drop(columns={'Product code', 'Bureau of Meteorology station number'}) solar = solar.rename(columns={'Year': 'year', 'Month': 'month', 'Day': 'day', 'Daily global solar exposure (MJ/m*m)': 'solar_exposure'}) solar.head(3) df = pd.read_csv("../../data/all.csv").reset_index(drop=True) df.head(3) df = df[df.time <= '2021-08-11 23:30:00'] df.tail(3) # ## 3) Feature Engineering drop_columns = ['demand_nsw', 'demand_sa', 'demand_tas', 'spot_price_nsw', 'spot_price_sa', 'spot_price_tas', 'spot_price_vic'] vic = df.drop(columns=drop_columns) vic.columns = ['time', 'inter_gen_nsw', 'inter_gen_sa', 'inter_gen_tas', 'inter_gen_vic', 'demand_vic', 'period'] vic.head(3) solar.isnull().values.any() # + # Feature engineering on datetime vic['time'] = vic.time.astype('datetime64[ns]') vic['month'] = vic.time.dt.month vic['day'] = vic.time.dt.day vic['day_of_year'] = vic.time.dt.dayofyear vic['year'] = vic.time.dt.year vic['weekday'] = vic['time'].apply(lambda x: x.weekday()) vic['week'] = vic.time.dt.week vic['hour'] = vic.time.dt.hour vic.loc[vic['month'].isin([12,1,2]), 'season'] = 1 vic.loc[vic['month'].isin([3,4,5]), 'season'] = 2 vic.loc[vic['month'].isin([6,7,8]), 'season'] = 3 vic.loc[vic['month'].isin([9, 10, 11]), 'season'] = 4 vic.tail(3) # - vic['solar_exposure'] = pd.merge(vic, solar, on=['year', 'month', 'day']).solar_exposure # Add fourier terms fourier_terms = add_fourier_terms(vic.time, year_k=3, week_k=3, day_k=3) vic = pd.concat([vic, fourier_terms], 1).drop(columns=['datetime']) vic.head(3) # Plot autocorrelation nlags=144 plot_tsc(vic.demand_vic, lags=nlags) # + # Add nlag features (choosing the first 10 highest autocorrelation nlag) dict_pacf = dict() list_pacf = pacf(df['demand_vic'], nlags=nlags) for nlag in range(nlags): if nlag >= 48: dict_pacf[nlag] = list_pacf[nlag] dict_pacf = {k: v for k, v in sorted(dict_pacf.items(), key=lambda item: abs(item[1]), reverse=True)} # 10 highest pacf nlag max_pacf_nlags = list(dict_pacf.keys())[:5] for nlag in max_pacf_nlags: vic['n_lag'+str(nlag)] = df.reset_index()['demand_vic'].shift(nlag) # - vic_train = vic[vic["time"] <= "2020-12-31 23:30:00"] vic_cv = vic[(vic['time'] >= "2021-01-01 00:00:00") & (vic['time'] <= "2021-06-30 23:30:00")].reset_index(drop=True) vic_test = vic[(vic['time'] >= "2021-07-01 00:00:00") & (vic['time'] <= "2021-08-11 23:30:00")].reset_index(drop=True) X_train = vic_train.drop(columns=['demand_vic', 'time'])[nlags:] y_train = vic_train.demand_vic[nlags:] X_cv = vic_cv.drop(columns=['demand_vic', 'time']) y_cv = vic_cv.demand_vic X_test = vic_test.drop(columns=['demand_vic', 'time']) y_test = vic_test.demand_vic X_train.head(3) X_train.columns # ## 4) First look at Random Forest Regressor rfr_clf = RandomForestRegressor(n_estimators=100) rfr_clf = rfr_clf.fit(X_train, y_train) # + print("Random Forest Regressor accuracy: ") rfr_result = rfr_clf.predict(X_test) rfr_residuals = y_test - rfr_result print('Mean Absolute Percent Error:', round(np.mean(abs(rfr_residuals/y_test)), 4)) print('Root Mean Squared Error:', np.sqrt(np.mean(rfr_residuals**2))) plt.figure(figsize=(20, 4)) plt.plot(y_test[:200], label='true value') plt.plot(rfr_result[:200], label='predict') plt.legend() plt.show() # - plt.figure(figsize=(20, 4)) plt.plot(rfr_residuals) plt.show() # Get numerical feature importances importances = list(rfr_clf.feature_importances_) # List of tuples with variable and importance feature_importances = [(feature, round(importance, 2)) for feature, importance in zip(X_train.columns, importances)] # Sort the feature importances by most important first feature_importances = sorted(feature_importances, key = lambda x: x[1], reverse = True) # Print out the feature and importances [print('Variable: {:20} Importance: {}'.format(*pair)) for pair in feature_importances] # ## 6) Predict CV and Test period demand # ### 6.1) Predict CV period demand # + X_train = vic_train.drop(columns=['demand_vic', 'time'])[nlags:] y_train = vic_train.demand_vic[nlags:] X_cv = vic_cv.drop(columns=['demand_vic', 'time']) y_cv = vic_cv.demand_vic rfr_clf = RandomForestRegressor(n_estimators=100) rfr_clf = rfr_clf.fit(X_train, y_train) # + print("Random Forest Regressor accuracy: ") rfr_result = rfr_clf.predict(X_cv) rfr_residuals = y_cv - rfr_result print('Mean Absolute Percent Error:', round(np.mean(abs(rfr_residuals/y_test)), 4)) print('Root Mean Squared Error:', np.sqrt(np.mean(rfr_residuals**2))) plt.figure(figsize=(20, 4)) plt.plot(y_cv, label='true value') plt.plot(rfr_result, label='predict') plt.legend() plt.show() # - vic_demand_cv_rfr = pd.DataFrame({'time': vic_cv.time, 'demand_vic': vic_cv.demand_vic}) vic_demand_cv_rfr['predicted_demand_vic'] = rfr_result vic_demand_cv_rfr.tail(3) vic_demand_cv_rfr.to_csv('predictions/vic_demand_cv_rfr.csv', index=False, header=True) # ### 6.2) Predict Test period demand # + idx_test_start = 61296 # index of df(full) where test start X_train = vic.drop(columns=['demand_vic', 'time'])[nlags:idx_test_start] y_train = vic.demand_vic[nlags:idx_test_start] X_test = vic_test.drop(columns=['demand_vic', 'time']) y_test = vic_test.demand_vic rfr_clf = RandomForestRegressor(n_estimators=100, random_state=1) rfr_clf = rfr_clf.fit(X_train, y_train) # + print("Random Forest Regressor accuracy: ") rfr_result = rfr_clf.predict(X_test) rfr_residuals = y_test - rfr_result print('Mean Absolute Percent Error:', round(np.mean(abs(rfr_residuals/y_test)), 4)) print('Root Mean Squared Error:', np.sqrt(np.mean(rfr_residuals**2))) plt.figure(figsize=(20, 4)) plt.plot(y_test, label='true value') plt.plot(rfr_result, label='predict') plt.legend() plt.show() # - vic_demand_test_rfr = pd.DataFrame({'time': vic_test.time, 'demand_vic': vic_test.demand_vic}) vic_demand_test_rfr['predicted_demand_vic'] = rfr_result vic_demand_test_rfr.tail(3) vic_demand_test_rfr.to_csv('predictions/vic_demand_test_rfr.csv', index=False, header=True)
modelling/random_forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.3 64-bit (''ML'': conda)' # language: python # name: python38364bitmlconda83cdf8b0c5bc499992ebd663b9630b13 # --- # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt pd.set_option('display.max_columns', None) # - df = pd.read_excel('aspiring_minds_employability_outcomes_2015.xlsx') # # Aspiring Mind Employment Outcome 2015 (AMEO) # # ![Aspiring Minds](images\unnamed.png) # # ## Introduction # # Aspiring Minds’ Employability Outcomes 2015 (AMEO 2015) is a unique dataset which contains engineering graduates’ employment outcomes (salaries, job titles and job locations) along with standardized assessment scores in three fundamental areas – cognitive skills, technical skills and personality. A relevant question is what determines the salary and the jobs these engineers are offered right after graduation. Various factors such as college grades, candidate skills, proximity of the college to industrial hubs, the specialization one has, market conditions for specific industries determine this. In our understanding, this is the first time anyone has release emoployment outcome data together with standardized assessment scores publicly. # # ## IKDD CoDs Challenge 2016 # # This data was first released as part of the IKDD CoDS 2016 Data Challenge. The competition attracted 11 entries with complete reports, out of which 5 were selected as the winners. 137 predictions were made by various people. # # ![Aspiring Minds](images\unnamed.jpg) # ## High-level Data Analysis # ### 1. Shape # + tags=[] print("Rows: \t", df.shape[0], "\nColumns: ", df.shape[1]) # - # ### 2. Attributes df.columns # #### Attributes Description # # | _Variables_ | _Type_ | _Description_ | # | :-------------------- | :----------------------- | :------------------------------------------------------------------------- | # | ID | UID | A unique ID to identify a candidate | # | Salary | Continuous | Annual CTC offered to the candidate (in INR) | # | DOJ | Date | Date of joining the company | # | DOL | Date | Date of leaving the company | # | Designation | Categorical | Designation offered in the job | # | JobCity | Categorical | Location of the job (city) | # | Gender | Categorical | Candidate’s gender | # | DOB | Date | Date of birth of candidate | # | 10percentage | Continuous | Overall marks obtained in grade 10 <br/>examinations | # | 10board | Continuous | The school board whose curriculum <br/>the candidate followed in grade 10 | # | 12graduation | Date | Year of graduation - senior year high school | # | 12percentage | Continuous | Overall marks obtained in grade 12 <br/>examinations | # | 12board | Date | The school board whose curriculum the <br/> candidate followed in grade 12 | # | CollegeID | NA/ID | Unique ID identifying the college which<br/> the candidate attended | # | CollegeTier | Categorical | Tier of college | # | Degree | Categorical | Degree obtained/pursued by the candidate | # | Specialization | Categorical | Specialization pursued by the candidate | # | CollegeGPA | Continuous | Aggregate GPA at graduation | # | CollegeCityID | NA/ID | A unique ID to identify the city in which<br/> the college is located in | # | CollegeCityTier | Categorical | The tier of the city in which the college is located | # | CollegeState | Categorical | Name of States | # | GraduationYear | Date | Year of graduation (Bachelor’s degree) | # | English | Continuous | Scores in AMCAT English section | # | Logical | Continuous | Scores in AMCAT Logical section | # | Quant | Continuous | Scores in AMCAT Quantitative section | # | Domain | Continuous/ Standardized | Scores in AMCAT’s domain module | # | ComputerProgramming | Continuous | Score in AMCAT’s Computer programming section | # | ElectronicsAndSemicon | Continuous | Score in AMCAT’s Electronics & Semiconductor<br/> Engineering section | # | ComputerScience | Continuous | Score in AMCAT’s Computer Science section | # | MechanicalEngg | Continuous | Score in AMCAT’s Mechanical Engineering section | # | ElectricalEngg | Continuous | Score in AMCAT’s Electrical Engineering section | # | TelecomEngg | Continuous | Score in AMCAT’s Telecommunication Engineering<br/> section | # | CivilEngg | Continuous | Score in AMCAT’s Civil Engineering section | # | conscientiousness | Continuous/ Standardized | Scores in one of the sections of AMCAT’s <br/>personality test | # | agreeableness | Continuous/ Standardized | Scores in one of the sections of AMCAT’s <br/>personality test | # | extraversion | Continuous/ Standardized | Scores in one of the sections of AMCAT’s <br/>personality test | # | neuroticism | Continuous/ Standardized | Scores in one of the sections of AMCAT’s <br/>personality test | # | openess_to_experience | Continuous/ Standardized | Scores in one of the sections of AMCAT’s <br/>personality test | # ### 3. Instances df.head() df.tail() df.isnull().sum() # ### 4. Attribute Characteristics # + tags=[] df.info() # - df.nunique() df['Designation'].value_counts() df['Degree'].value_counts() df['Specialization'].value_counts() # ### 5. Statistical Analysis df.describe() colors = plt.cm.PRGn correlation = df.corr(method='pearson') plt.figure(figsize = (30,27)) plt.title("Correlation Matrix: Aspiring Minds DataSet", y = 1.02, size = 18) sns.heatmap( correlation, linewidths = 0.12, vmax = 1.0, square = True, cmap = colors, linecolor = 'white', annot =True ) plt.show() plt.figure(figsize = (16,12)) plt.title("Histogram: Salary", y = 1.02, size = 16) df['Salary'].hist(bins =50, density=1, facecolor='g', alpha=0.75) plt.show() plt.figure(figsize = (16,12)) plt.title("Histogram: Salary", y = 1.02, size = 16) df['10percentage'].hist(bins = 20, density=1, facecolor='g', alpha=0.75) plt.show()
Assignment 3/.ipynb_checkpoints/assignment_3-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/SubhamChoudhury/The-Sparks-Foundation-Data-Science-Internship-Project/blob/main/Task_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Xz8qfG-7lH0j" # # TSF Task 1: Simple Linear Regression # ### By <NAME> # + [markdown] id="ZU27rWM7lmq5" # ### Simple Linear Regression: # #### In this regression task we predict the percentage of marks that a student is expected to score based upon the number of hours they study. This is a simple linear regression task as it involves just two variables. # + [markdown] id="6cMgB6t3mnOd" # IMPORTING LIBRARIES # + id="ZjPM0DpgcGwZ" import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline # + colab={"base_uri": "https://localhost:8080/"} id="IpeJjpnSfM3Y" outputId="cb3182c1-09ec-47b6-919d-9310d366c6dd" #loading the dataset data=pd.read_csv("https://raw.githubusercontent.com/AdiPersonalWorks/Random/master/student_scores%20-%20student_scores.csv") print("Data imported successfully") # + colab={"base_uri": "https://localhost:8080/", "height": 824} id="SyEHO0_lfcsc" outputId="6754cf01-5beb-4fd8-e4b0-3fa70d6f0823" data # + [markdown] id="uXLIKew0oWIP" # VISUALIZING DATA # # + colab={"base_uri": "https://localhost:8080/", "height": 297} id="6vkNr4Uff10V" outputId="6a1a76a9-9bee-4cf4-8b89-17fecd4976ac" data.describe() # + colab={"base_uri": "https://localhost:8080/"} id="z6x_QaSgn3td" outputId="40e83c28-d216-410e-cbe2-687297fcc333" data.info() # + [markdown] id="Ik18eHqtoaBN" # **Plotting 2D vizualization graph for the data to check out the relationship in the dat**a # # # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="xTenrZhff4hl" outputId="67682cbe-e4e7-4161-eec4-da2ef32f7a4d" data.plot(x="Hours", y="Scores",style='o') plt.title('Hours vs Percentage') plt.xlabel('No. Of Hours Studied') plt.ylabel('Percentage Scored') plt.show() # + [markdown] id="7SUV5_jZor7A" # From the graph , we can note that there is a positive linear relation between the number of hours studied and percentage scored. # + [markdown] id="99JXuxOHpYgA" # ## Linear Regression Model # Data Preparation and training # + id="iLT0DHIPf8Sf" X = data['Hours'].values.reshape(-1,1) y = data['Scores'].values # + id="_SVuHIbWhRph" from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=24) # + colab={"base_uri": "https://localhost:8080/"} id="DtM_BwrWhPon" outputId="32b61b44-518c-43a0-b7be-1e95287de6dc" from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(X_train,y_train) print("Training successfully completed") # + colab={"base_uri": "https://localhost:8080/", "height": 265} id="G-HBUtq1heoB" outputId="8d4daaef-c853-4e28-9f4f-d7ae0407f9e8" plt.scatter(X,y,color='blue') plt.plot(X_test,y_pred,color='red') plt.show() # + [markdown] id="S3iidBpbp5-z" # **Error and Accuracy Check** # + colab={"base_uri": "https://localhost:8080/"} id="VAfmscdAi2m2" outputId="813e4377-6dac-4cc9-8ca4-c8119e5859a3" print('Test Score') print(lr.score(X_test, y_test)) print('Training Score') print(lr.score(X_train, y_train)) # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="eMWbdU36i8aN" outputId="276d2269-b37d-4dbc-b1f5-ca8939793ddd" new_data=pd.DataFrame({'Original':y_test,"Predicted":y_pred}) new_data # + colab={"base_uri": "https://localhost:8080/"} id="01kLN8-VkK72" outputId="cb259e96-2600-416b-fb8e-1872e89007e2" hours_1 = [[9.25]] own_pred_1 = lr.predict(hours_1) print("No of Hours = {}".format(hours_1[0][0])) print("Predicted Score = {}".format(own_pred_1[0])) # + colab={"base_uri": "https://localhost:8080/"} id="eOMmkKiPkuhS" outputId="9ee9779a-5a30-497c-de66-2b8dcd476ec8" from sklearn import metrics print('Mean Squared error:', metrics.mean_squared_error(y_test, y_pred)) print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred)) print('Root Mean Squared error:',np.sqrt(metrics.mean_absolute_error(y_test, y_pred))) # + [markdown] id="SwdEJN-BqDe9" # TASK-1 completed # + [markdown] id="qDm7kg8olDa2" #
Task_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Task3:Perform ‘Exploratory Data Analysis’ on dataset ‘SampleSuperstore’ # + # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df=pd.read_csv(r"SampleSuperstore.csv") df # - # ## DATA CLEANING df.drop(["Country"],axis=1) df.isnull().sum() df["Segment"].unique() plt.figure(figsize=(20,20)) df[plot(kind="bar") plt.figure(figsize=(25,25)) pd.crosstab(df["Quantity"],df["Category"],df["Profit"],aggfunc="sum").plot(kind="bar",stacked=True) plt.figure(figsize=(25,25)) pd.crosstab(df["Segment"],df["Category"],df["Quantity"],aggfunc="sum").plot(kind="bar",stacked=True) sns.scatterplot("Sales","Profit",data=df) plt.figure(figsize=(10,10)) df["Category"].value_counts().plot.pie(autopct='%.1f%%') labels = df["Sub-Category"] colors = ['aqua', 'pink', 'blue', 'fuchsia', 'gray', 'green', 'lime', 'maroon', 'navy', 'olive', 'orange', 'purple', 'red', 'silver', 'teal', 'white','yellow'] explode=(0,0,0,0,0,0,0,0,0,0,0,0,0.1,0.2,0.3,0.4,0.5) fig, ax1 = plt.subplots(figsize = (12,12)) ax1.pie(df["Sub-Category"].value_counts(), colors=colors, explode=explode,startangle=90, autopct='%.1f%%',) plt.title("Subcategory division") ax1.legend(labels, loc = "upper right") plt.tight_layout() plt.show()
GripSol3_Superstore.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os os.chdir('..') from pathlib import Path import json # # Paths # + coco_train_path = Path('data')/'benign_data'/'coco_train.json' coco_test_path = Path('data')/'benign_data'/'coco_test.json' coco_eval_path = Path('data')/'benign_data'/'coco_eval.json' data_dir = Path('data')/'benign_data' eval_coco_path = data_dir/'coco_eval.json' eval_img_dir = data_dir/'eval_imgs' eval_dir = Path('output')/'eval' rcnn_annos_path = eval_dir/'rcnn'/'eval_annos_rcnn.json' retina_annos_path = eval_dir/'retina'/'eval_annos_retina.json' yolo_annos_path = eval_dir/'eval_annos_yolo_1.json' # - # --- # + with coco_test_path.open('r') as f: coco_test = json.load(f) with coco_train_path.open('r') as f: coco_train = json.load(f) with coco_eval_path.open('r') as f: coco_eval = json.load(f) # - coco_train['categories'] # + # for coco in (coco_test, coco_train): # for anno in coco['annotations']: # anno['iscrowd'] = 0 # - for anno in coco_eval['annotations']: anno['iscrowd'] = 0 # + # with coco_test_path.open('w') as f: # json.dump(coco_test, f) # with coco_train_path.open('w') as f: # json.dump(coco_train, f) with coco_eval_path.open('w') as f: json.dump(coco_eval, f) # - # # Viz import os os.chdir('..') import detectron2_1 from detectron2.config import get_cfg from pathlib import Path from detectron2.engine import DefaultPredictor import cv2 from detectron2.data import MetadataCatalog from detectron2.utils.visualizer import Visualizer from PIL import Image # + img_dir = Path('data') img_path = img_dir/'samples'/'WechatIMG18.png' save_path = img_dir/'samples'/'WechatIMG18_pred.png' model_dir = Path('output') rcnn_dir = model_dir/'rcnn_2' rcnn_cfg_path = rcnn_dir/'config.yaml' rcnn_weights_path = rcnn_dir/'model_final.pth' # - im = cv2.imread(str(img_path)) # + tags=[] cfg = get_cfg() cfg.merge_from_file(rcnn_cfg_path) cfg.MODEL.WEIGHTS = str(rcnn_weights_path) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.03 # + tags=[] predictor = DefaultPredictor(cfg) # - outputs = predictor(im) benign_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes = ["box", "logo"] v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0])) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) Image.fromarray(out.get_image()) Image.fromarray(out.get_image()).save(save_path) # # Evaluation import matplotlib.pyplot as plt from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval import numpy as np import skimage.io as io import pylab # + tags=[] cocoGt = COCO(eval_coco_path) # + tags=[] cocoDt = cocoGt.loadRes(str(yolo_annos_path)) # + tags=[] cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() # -
notebooks/test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Init from PIL import Image import os import numpy as np from random import randint, randrange import glob from math import floor # folder for single stone images direc = r"" os.chdir(direc) # load single stone images. s = '.png' stoneImL = [] # for i in range(0,39): for filename in glob.glob("*"): # do not use glob if file name ordering is required # loc = str(i)+s # im=Image.open(loc) im = Image.open(filename) stoneImL.append(im) # folder for base stones direc = r"" os.chdir(direc) # load base stone images. s = '.png' baseImL = [] for filename in glob.glob("*"): # do not use glob if file name ordering is required # loc = str(i)+s # im=Image.open(loc) im = Image.open(filename) baseImL.append(im) #return top left corner of paste box def getBox(img, x, y): box = randint(1,4) w,h = img.size if box == 4: return x,y elif box == 1: return x-w, y-h elif box == 2: return x,y-h else: return x-w,y # # Generate images with black background lenStone = len(stoneImL) # size and number of panel target image can be divided cWidth = 100 num = 8 # min rotation angle rAngle = 60 imageStart = 0 imageEnd = 20,000 # + # generate random images, black background. # pos = 0 direc = r"" os.chdir(direc) for k in range(imageStart, imageEnd): imgB = Image.new('RGBA', (cWidth*num, cWidth*num), 'black') for i in range(1,num+1): for j in range(1,num+1): # select random stone pos = randrange(0,lenStone) imS = stoneImL[pos] # select random size size = randint(1,5) # select random orientaion rot = randint(0,5) # displacement noise in image positions wNoise = randint(-int(cWidth/4), int(cWidth/4)) hNoise = randint(-int(cWidth/4), int(cWidth/4)) if(size == 5): ## size 5 means no image, continue w, h = imS.size pstIm = imS.resize( (int(w/(2*size)),int(h/(2*size)) ), Image.ANTIALIAS) pstIm = pstIm.rotate(rot*rAngle, expand = 1) x = int(cWidth*i-cWidth/2 + wNoise) y = int(cWidth*j-cWidth/2 + hNoise) # paste position is divided into 4 rectangle with x,y being top left or top right or.... vertice # of different rectangles, use get box to choose one of them x1,y1 = getBox(pstIm, x, y) imgB.paste(pstIm, (x1, y1), pstIm.split()[3]) loc = str(k) + s img.save(loc, "PNG") # - # # Generate images with stone background # size and number of panel target image can be divided cWidth = 100 baseW,_ = baseImL[0].size num = floor(baseW/cWidth) # min rotation angle rAngle = 60 imageStart = 0 imageEnd = 20,000 lenBase = len(baseImL) lenStone = len(stoneImL) # + # generate random images, base stone background. # pos = 0 direc = r"" os.chdir(direc) for k in range(imageStart, imageEnd): pos = randrange(0, lenBase) imgB = baseImL[pos].copy() for i in range(1,num+1): for j in range(1,num+1): # select random stone pos = randrange(0,lenStone) imS = stoneImL[pos] # select random size size = randint(1,5) # select random orientaion rot = randint(0,5) # displacement noise in image positions wNoise = randint(-int(cWidth/4), int(cWidth/4)) hNoise = randint(-int(cWidth/4), int(cWidth/4)) if(size == 5): ## size 5 means no image, continue w, h = imS.size pstIm = imS.resize( (int(w/(2*size)),int(h/(2*size)) ), Image.ANTIALIAS) # Expand = 1 does lead increase in image size with every rotation, increase is not too large for single rotation pstIm = pstIm.rotate(rot*rAngle, expand = 1) x = int(cWidth*i-cWidth/2 + wNoise) y = int(cWidth*j-cWidth/2 + hNoise) # paste position is divided into 4 rectangle with x,y being top left or top right or.... vertice # of different rectangles, use get box to choose one of them x1,y1 = getBox(pstIm, x, y) imgB.paste(pstIm, (x1, y1), pstIm.split()[3]) loc = str(k) + s imgB.save(loc, "PNG") # - # # Concatenate contour and target image def get_concat_h(im1, im2): dst = Image.new('RGB', (im1.width + im2.width, im1.height)) dst.paste(im1, (0, 0)) dst.paste(im2, (im1.width, 0)) return dst # + # join image and contour # target image folder loc2 = r"......\imNew\\" # contour images folder loc1 = r'.......\imNewEdge\\' # output folder locO = r"........\imCombNew\\" s = '.png' for k in range(0,500): inL1 = loc1 + str(k) + s inL2 = loc2 + str(k) + s inF1 = Image.open(inL1) inF2 = Image.open(inL2) outF = get_concat_h(inF1, inF2) outL = locO + str(k) + s outF.save(outL, "PNG")
create_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.0 # language: julia # name: julia-0.5 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Overview of the Package Ecosystem # # Julia's package ecosystem is organized in terms of Github organizations. While this is informal, many of the main packages (but not all!) can be found in the various organizations. # # http://julialang.org/community/ # # A useful source on the the changing package ecosystem (might be) found here: # # http://www.pkgupdate.com/ # + [markdown] slideshow={"slide_type": "slide"} # ## A Quick Look At Some Organizations # # Let's take a quick look at some organizations which provide important functionality to Julia. I will go through some of the most well-developed and "ready for use orgs". Of course, there are more that I will be leaving off the list. # # # + [markdown] slideshow={"slide_type": "slide"} # ### JuliaLang # # - JuliaLang is the Base organization # - It holds the Julia language itself # - Other core pacakges exist in JuliaLang # - PkgDev for package development # - IJulia # - Compat for version compatibility # - There is a general trend of "slimming Base" to lower the Travis load on JuliaLang # + [markdown] slideshow={"slide_type": "slide"} # ### JuliaStats # # - Hosts Dataframes.jl, the data frame implementation of Julia # - Distributions.jl holds probability distributions and methods for generating random numbers according to specific distributions # - The standard regression and hypothesis testing libraries are held here # - Klara.jl is a native MCMC engine # - One of the main R linear model library developers, <NAME>, is a heavy contributor # # Note, Dataframes used to be slow. A very large change is coming in the next week. To understand it in detail, read: http://www.johnmyleswhite.com/notebook/2015/11/28/why-julias-dataframes-are-still-slow/ # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaOpt # # - Julia for Mathematical Programming (JuMP) is one of the premire Julia libraries. It implements a DSL for interfacing with many commercial and non-commercial mathematical optimization (linear, mixed-integer, conic, semidefinite, nonlinear) algorithms. Most of JuliaOpt can be used through JuMP # - Optim.jl are a set of native Julia optimization algorithms # - An interesting fact is that the creator of NLopt is a heavy contributor to Julia and JuliaOpt # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaParallel # # Bindings to many popular parallel libraries / APIs are found in JuliaParallel: # # - DistributedArrays.jl: A distributed array implmentation # - PETSc.jl # - MPI.jl # - ScaLAPACK.jl # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaGPU # # Bindings for common GPU libraries: # # - ArrayFire.jl # - CUDArt.jl # - CUSPARSE.jl # - CUDNN.jl # - CUFFT.jl # - CUBLAS.jl # # JuliaGPU is also developing a framework for easy GPU usage: # # - CUDAnative.jl # - GPUArrays.jl # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaDiff # # JuliaDiff holds libraries for differentiation in Julia # # - ForwardDiff.jl: A robust implementation of forward-mode autodifferentiation # - ReverseDiffSource.jl: A newer library for reverse-mode autodifferentiation (backwards propogation) # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaGraphs # # JuliaGraphs is built around LightGraphs.jl, a fast and performant implementation of graph algorithms in Julia # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaMath # # JuliaMath holds basic mathematical libraries. # # - IterativeSolvers.jl: Iterative methods for `Ax=b`, Krylov subspace methods, etc. # - Roots.jl: Root-finding algorithms # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaDiffEq # # JuliaDiffEq holds the packages for solving differential equations. # # - DifferentialEquations.jl: The core package for solving ODEs, SDEs, PDEs, DAEs, (hopefully DDEs soon...). Also wraps ODE.jl, Sundials.jl, and ODEInterface.jl into one convenient API. # - Sundials.jl: Wrappers for the Sundials ODE/DAE solvers # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaPlots # # JuliaPlots is the organization for plotting and visualization. # # - Plots.jl is a metapackage for plotting. It is the main plotting library in Julia. # - GGPlots.jl is a Grammar of Graphics (GoG) API for Plots.jl # - StatPlots.jl provides plotting recipes for statistics in Plots.jl # - PlotRecipes.jl is a random assortment of plotting recipes # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaInterop # # Interoperability of Julia with other languages. # # - MATLAB.jl # - RCall.jl # - Mathematica.jl # - JavaCall.jl # - CxxWrap.jl # - ObjectiveC.jl # + [markdown] slideshow={"slide_type": "slide"} # ## JuliaPy # # Julia interop with Python # # - PyPlot.jl: A wrapper for the Python matplotlib library # - SymPy.jl # - PyCall.jl # - pyjulia # - Pandas.jl # + [markdown] slideshow={"slide_type": "slide"} # ## Misc # # - JLD.jl: An HDF5-based saving format for Julia # - Bio.jl: A huge library for bioinformatics in Julia
Notebooks/.ipynb_checkpoints/PackageEcosystem-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # For linear neuron we have a quadratic bowl. For a hidden layer neuron we have multiple bowls. # add multi surface from vincent pascal # # # # <img src="ngsgd1.png"> # <img src="ngsgd2.png"> # # Source Andrew Ng lecture notes # Gradient descent use all the training samples # SGD/Online: use one training sample # MiniBatch: use batch, typically 100, 10 or some number user picks. # Most common method, # 1) the process of averaging the partial derivatives removes effect of bad data samples. # 2) can parallelize using GPU by computing gradiesnts in parallel. Each matrix multiple/training case # can be parallelized both at training case level and row/column level. # Hinton: minibatches need to be balanced across classes. Example was if you have 10 classes, you want 100 each from each # of the 10 classses. One way to approximate this is to grab randomly from training set. If the weights are not random # you get oscillation in the weights which is not efficient/can get stuck # # One problem is the direction of the gradient or epsilon we are moving towards as we update teh weights # 1) are we converging or just bouncing around b/c the learning rate is too big # <img src ="sgd1.png" > # # # # 2) are we not converging bc the learning rate is too small and/or we are going in the wrong direction # <img src ="sgd2.png" > # Images from Hinton/Coursera lecture slides # # 3) are we stuck in a local minima? Annealing, where we wait for a stable point, ie no change in error rate then add a perturbation # or change the learning rate to make it leave the local minimia # # 4) there are multiple answers. Which one is best? May not be able to replicate solution. # <img src="multipleanswers.png"> # # 5) stuck on plateau;the gradient becomes very small. This may look like a # local minima but is not. Look at the weights for the Hidden units, are they all positive or all negative? # All pegged in one area? # # common in multilayer perceptrons w/hidden units(pascal graph) # # # # # # # # # # + deletable=true editable=true Data preprocessing for SGD: 1) input mean at 0 2) unit variance # + [markdown] deletable=true editable=true # Nonlinear Conjugate Gradient Descent # Momentum Optimizer # AdaGradOptimizer # AdaDeltaOptimizer # AdamOptimizer # FtriOptimizer v # RMSPropOptimizer # # Image optimizer # CTR prediction optimizer # # # <img src="mom1.png"> # # + deletable=true editable=true
hinton/sgd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sequence Generation # # In this exercise, you will design an RNN to generate baby names! You will design an RNN to learn to predict the next letter of a name given the preceding letters. This is a character-level RNN rather than a word-level RNN. # # This idea comes from this excellent blog post: http://karpathy.github.io/2015/05/21/rnn-effectiveness/ # + # %matplotlib inline import numpy as np from keras.preprocessing import sequence from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Embedding from keras.layers import LSTM, SimpleRNN, GRU # - # ## Training Data # # The training data we will use comes from this corpus: # http://www.cs.cmu.edu/afs/cs/project/ai-repository/ai/areas/nlp/corpora/names/ # # Take a look at the training data in `data/names.txt`, which includes both boy and girl names. Below we load the file and convert it to all lower-case for simplicity. # # Note that we also add a special "end" character (in this case a period) to allow the model to learn to predict the end of a name. # + with open('../data/names.txt') as f: names = f.readlines() names = [name.lower().strip() + '.' for name in names] print('Loaded %d names' % len(names)) # - names[:10] # We need to count all of the characters in our "vocabulary" and build a dictionary that translates between the character and its assigned index (and vice versa). # + chars = set() for name in names: chars.update(name) vocab_size = len(chars) print('Vocabulary size:', vocab_size) char_inds = dict((c, i) for i, c in enumerate(chars)) inds_char = dict((i, c) for i, c in enumerate(chars)) # - char_inds # #### Exercise 1 - translate chars to indexes # # Most of the work of preparing the data is taken care of, but it is important to know the steps because they will be needed anytime you want to train an RNN. Use the dictionary created above to translate each example in `names` to its number format in `int_names`. # + # Translate names to their number format in int_names # - # The `create_matrix_from_sequences` will take the examples and create training data by cutting up names into input sequence of length `maxlen` and training labels, which are the following character. Make sure you understand this procedure because it is what will actually go into the network! # + def create_matrix_from_sequences(int_names, maxlen, step=1): name_parts = [] next_chars = [] for name in int_names: for i in range(0, len(name) - maxlen, step): name_parts.append(name[i: i + maxlen]) next_chars.append(name[i + maxlen]) return name_parts, next_chars maxlen = 3 name_parts, next_chars = create_matrix_from_sequences(int_names, maxlen) print('Created %d name segments' % len(name_parts)) # - X_train = sequence.pad_sequences(name_parts, maxlen=maxlen) y_train = np_utils.to_categorical(next_chars, vocab_size) X_train.shape X_train[:5] # #### Exercise 2 - design a model # # Design your model below. Like before, you will need to set up the embedding layer, the recurrent layer, a dense connection and a softmax to predict the next character. # # Fit the model by running at least 10 epochs. Later you will generate names with the model. Getting around 30% accuracy will usually result in decent generations. What is the accuracy you would expect for random guessing? model.fit(X_train, y_train, batch_size=32, epochs=10, verbose=1) # ## Sampling from the model # # We can sample the model by feeding in a few letters and using the model's prediction for the next letter. Then we feed the model's prediction back in to get the next letter, etc. # # The `sample` function is a helper to allow you to adjust the diversity of the samples. You can read more [here](https://en.wikipedia.org/wiki/Softmax_function#Reinforcement_learning). # # Read the `gen_name` function to understand how the model is sampled. # + def sample(p, diversity=1.0): p1 = np.asarray(p).astype('float64') p1 = np.log(p1) / diversity e_p1 = np.exp(p1) s = np.sum(e_p1) p1 = e_p1 / s return np.argmax(np.random.multinomial(1, p1, 1)) def gen_name(seed, length=1, diversity=1.0, maxlen=3): """ seed - the start of the name to sample length - the number of letters to sample; if None then samples are generated until the model generates a '.' character diversity - a knob to increase or decrease the randomness of the samples; higher = more random, lower = closer to the model's prediction maxlen - the size of the model's input """ # Prepare input array x = np.zeros((1, maxlen), dtype=int) # Generate samples out = seed while length is None or len(out) < len(seed) + length: # Add the last chars so far for the next input for i, c in enumerate(out[-maxlen:]): x[0, i] = char_inds[c] # Get softmax for next character preds = model.predict(x, verbose=0)[0] # Sample the network output with diversity c = sample(preds, diversity) # Choose to end if the model generated an end token if c == char_inds['.']: if length is None: return out else: continue # Build up output out += inds_char[c] return out # - # #### Exercise 3 - sample the model # # Use the `gen_name` function above to sample some names from your model. # # 1. Try generating a few characters by setting the `length` argument. # 2. Try different diversities. Start with 1.0 and vary it up and down. # 3. Try using `length=None`, allowing the model to choose when to end a name. # 4. What happens when `length=None` and the diversity is high? How do samples change in this case staring from beginning to end? Why do you think this is? # 5. With `length=None` and a "good" diversity, can you tell if the model has learned a repertoire of "endings"? What are some of them? # 6. Find some good names. What are you favorites? :D # #### Exercise 4 - retrain # # Now that you have seen some samples, go back up and redefine your model to "erase" it. Don't train it again yet. You can sample again to compare the quality of the samples before the model is trained. # # Experiment with the hidden layer size, the maxlen, the number of epochs, etc. Do you observe any differences in the sample behavior? # # Not all changes will make an observable impact, but do experiments to see what you can discover.
day_4/Lab_23_DL Sequence Generation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # # Reuters-21578 collection # # Reuters-21578 is arguably the most commonly used collection for text classification during the last two decades. # It has been used in some of the most influential papers on the field. # # This dataset contains structured information about newswire articles that can be assigned to several classes, # therefore making this a multi-label problem. It has a highly skewed distribution of documents over categories, # where a large proportion of documents belong to few topics. # # The collection originally consists of 21,578 documents, including documents without topics and typographical errors. # For this reason, a subset and split of the collection is traditionally used. This split also focus only on the # categories that have at least one document in the training set and the test set. The dataset has 90 categories with a # training set of 7769 documents and a test set of 3019 documents. # # # Data Collection Stats # # Understanding your collection is always the first step of any data science problem. Lets have a quick look at the Reuters collection and its documents. # + from nltk.corpus import reuters # List of document ids documents = reuters.fileids() print("Documents: {}".format(len(documents))) # Train documents train_docs_id = list(filter(lambda doc: doc.startswith("train"),documents)) print("Total train documents: {}".format(len(train_docs_id))) # Test documents test_docs_id = list(filter(lambda doc: doc.startswith("test"),documents)) print("Total test documents: {}".format(len(test_docs_id))) # - # Example of a document (with multiple labels) doc = 'training/9865' print(reuters.raw(doc)) print() print(reuters.categories(doc)) from operator import itemgetter from pprint import pprint # List of categories categories = reuters.categories() print("Number of categories: {}".format(len(categories))) print() print(categories) # + # Documents per category category_distribution = [(category, len(reuters.fileids(category))) for category in categories] category_distribution = sorted(category_distribution,key = itemgetter(1),reverse = True) print("Most common categories") pprint(category_distribution[:5]) print() print("Least common categories") pprint(category_distribution[-5:]) print() # - from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import MultiLabelBinarizer from sklearn.svm import LinearSVC from sklearn.multiclass import OneVsRestClassifier stop_words = stopwords.words("english") train_docs = [reuters.raw(doc_id) for doc_id in train_docs_id] test_docs = [reuters.raw(doc_id) for doc_id in test_docs_id] # # Tokenisation # + vectorizer = TfidfVectorizer(stop_words=stop_words) # Learn and transform train documents vectorised_train_documents = vectorizer.fit_transform(train_docs) vectorised_test_documents = vectorizer.transform(test_docs) # - # # Transform multilabel labels mlb = MultiLabelBinarizer() train_labels = mlb.fit_transform(reuters.categories(doc_id) for doc_id in train_docs_id) test_labels = mlb.transform(reuters.categories(doc_id) for doc_id in test_docs_id) # # Classifier # + classifier = OneVsRestClassifier(LinearSVC(random_state=42)) classifier.fit(vectorised_train_documents,train_labels) predictions = classifier.predict(vectorised_test_documents) print("Numbers of labels assigned: {}".format(sum([sum(prediction) for prediction in predictions]))) # - # # How well have we done? from sklearn.metrics import f1_score, precision_score, recall_score # # Micro-average scores precision = precision_score(test_labels, predictions, average = 'micro') recall = recall_score(test_labels, predictions, average = 'micro') f1 = f1_score(test_labels, predictions, average = 'micro') print("Micro-average quality numbers") print("Precision: {}, Recall: {}, F1-measure: {}".format(precision,recall,f1)) precision = precision_score(test_labels, predictions, average='macro') recall = recall_score(test_labels, predictions, average='macro') f1 = f1_score(test_labels, predictions, average='macro') print("Macro-average quality numbers") print("Precision: {:.4f}, Recall: {:.4f}, F1-measure: {:.4f}".format(precision, recall, f1)) # ## What is this weird warning message about "Precision being ill-defined?" # # Some metrics can be in a position of indeterminate value when for instance, the classifier decides to not classify any articles in a specific category. This would imply a 0/0 precision. Scikit learn warns us about this, and reports back this quality as 0.0.
notebooks/05 text_classification_Reuters.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from scipy import stats import numpy as np import math from bokeh.io import show from bokeh.plotting import figure from big_data_tools.bokeh_tools.bar import bar from big_data_tools.bokeh_tools.histogram import hist from big_data_tools.bokeh_tools.box_plot import box_plot from big_data_tools.bokeh_tools.prob_plot import prob_plot from bokeh.layouts import gridplot from bokeh.models import NumeralTickFormatter data = [1.38, .44, 1.09, .75, .66, 1.28, .51, .39, .70, .46, .54, .83, .58, .64, 1.3, .57, .43, .62, 1.0, 1.05, .82, 1.1, .65, .99, .56, .56, .64, .45, .82, 1.06, .41, .58, .66, .54, .83, .59, .51, 1.04, .85, .45, .52, .58, 1.11, .34, 1.25, .38, 1.44, 1.28, .51 ] # + p_box = box_plot(cats= ['a'], data = [data]) p_qq_normal = prob_plot(data, 'norm') grid = gridplot([p_box, p_qq_normal,], ncols = 2,) show(grid) # - stats.describe(data) needed_z = stats.norm.ppf(.05) std = math.sqrt(np.var(data)) std_sample = std/math.sqrt(49) z_score = (np.mean(data)- 1)/std_sample print(z_score, std) upper = np.mean(data) + needed_z * -1 * std_sample print(upper)
problem_8_27_stats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Before running this notebook, run this command in your terminal `pip install pylandtemp` or simply copy and paste this `!pip install pylandtemp` into a cell and run # # 1. Import python dependencies import numpy as np import matplotlib.pyplot as plt import rasterio.plot import rasterio # # 2. Location # For this tutorial, we’ll use the NIR and Red bands from a Landsat-8 scene above part of the central valley and the Sierra Nevada in California. We’ll be using Level 1TP datasets, orthorectified, map-projected images containing radiometrically calibrated data. # # 3. Bands needed from land surface temperature computation (Split window) # - Red: Band 4 # - Near-Infrared (NIR): Band 5 # - Thermal infrared 1: Band 10 # - Thermal infrared 2: Band 11 # # Here, I have used `rasterio` to load the images/bands needed. url = 'http://landsat-pds.s3.amazonaws.com/c1/L8/042/034/LC08_L1TP_042034_20170616_20170629_01_T1/' #url = 'https://storage.googleapis.com/gcp-public-data-landsat/LC08/01/042/034/LC08_L1TP_042034_20170616_20170629_01_T1/' redband = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(4) # L1TP_216074_20160531_20180528_01_T1_B4.TIF' nirband = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(5) tempband10 = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(10) tempband11 = 'LC08_L1TP_042034_20170616_20170629_01_T1_B{}.TIF'.format(11) # + with rasterio.open(url+redband) as src: redImage = src.read(1).astype('f4') with rasterio.open(url+nirband) as src: nirImage = src.read(1).astype('f4') with rasterio.open(url+tempband10) as src: tempImage10 = src.read(1).astype('f4') with rasterio.open(url+tempband11) as src: tempImage11 = src.read(1).astype('f4') # - # # 8. Compute land surface temperature from pylandtemp import split_window # ### Split window # Available methods to choose from include: # # `'jiminez-munoz'`: Jiminez-Munoz et al, 2008 # # `'kerr'`: Kerr Y et al, 2004 # # `'mc-millin'`: <NAME>. , 1975 # # `'price'`: Price J. C., 1984 # # `'sobrino-1993'`: <NAME>. et al, 1993 # method = 'price' lst_image_split_window = split_window( tempImage10, tempImage11, redImage, nirImage, lst_method=method, emissivity_method='avdan', unit='celcius' ) # ##### Visualize the Land Surface Temperature obtained plt.imshow(lst_image_split_window) plt.colorbar() plt.title('{}\n LST in Celcius {}'.format(f'{method} method', lst_image_split_window.shape)) plt.xlabel('Column #') plt.ylabel('Row #') plt.show()
tutorials/Tutorial_4- compute_land_surface_temperature_split_window.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Defining performance metrics # # confusion matrix # - recall : 실제 트루값 중 진짜 트루고 예측도 트루인 친구들 # - precision : 예측한 트루값중 진짜 트루고 예측도 트루인 친구들 # - fscore : single matrix summerize the perform of the model # - 2pr/(p+r) p : precision, r : recall # # Designing a baseline model # - you will do well to fisrt model and algorithm already known to perform the bset fpr that task # - you can even trained different dataset for your own problem without having to train it from scratch # - called transfer learning # # Data preprocessing # - When given large amount of training data they extract and learn features from raw data # - but still need some preprocessing to improve performance of work within specific limitations on the neural network # - such as converting images to grayscale, img resizing, normalization, and data argument # # #### grayscale rules # - if you can identity the object with your eyes in grayscale # - dnn will understand too : low computer power # #### image resize # - for input_shape : and computer vision # #### data normalization # - input feature has similar data distribution # - so we do the normalization # - [0, 1] range make boost learning performance and make the network converge fast # - HOW to make neural network fast, # - small values : [0, 1] values should be in [0, 1] # - homogenous : All pixels should have values in the same range # <img src="https://blog.kakaocdn.net/dn/bXs8JV/btq54NWJ2P7/L2GQwF18VADgyKO62kgSD1/img.png"> # #### show why normalization important # - non normalized features make cost function squished, elongated bowl # <img src="https://blog.kakaocdn.net/dn/c9Y0YD/btq56dnI70a/HX9cOKytmm0b5hUENWRU80/img.png"> # # Evaluating the model and interpretung its performance # ### Diagnosing oberfitting and underfitting # - after running you experiment # - want to observe its performance, determine if bottelnecks are impacting its performance # - main cause of poor performance in ML is either overfitting or underfitting the training dataset # #### to diagnose underfitting and overfitting # - two values ot focus on while training poorly on the validation set # #### if train_error is lower than val error # - overfitting # - tuning hyperparameters to avoid overfitting # # #### if val error is lower than train_error # - under fitting # - consider adding more hidden layers or train longer or try different neural network architecture # # Code for practice from sklearn.datasets import make_blobs from keras.utils import to_categorical from keras.models import Sequential from keras.layers import Dense from matplotlib import pyplot X, y = make_blobs(n_samples=1000, centers=3, n_features=2, cluster_std=2, random_state=2) y = to_categorical(y) n_train = 800 train_X, test_X = X[:n_train, :], X[n_train:, :] train_y, test_y = y[:n_train], y[n_train:] print(train_X.shape, test_X.shape) # + model = Sequential() model.add(Dense(25, input_dim=2, activation='relu')) model.add(Dense(10, input_dim=2, activation='relu')) model.add(Dense(3, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() # - history = model.fit(train_X, train_y, validation_data=(test_X, test_y), epochs=1000, verbose=1) pyplot.plot(history.history['acc'], label='train') pyplot.plot(history.history['val_acc'], label='test') pyplot.legend() pyplot.show() # # Collecting more data or tuning hyperparameters # - tip # - hyperparameters : we don't set, automatic learn and tune, weight bias # - parameters : we set !, like epochs learning rate... # ## Network architecture # - generally it is good to add hidden neurons until the validatioln error no longer improve # - trade off is computationally expensive to train deeper networks # - having small number of units may lead to underfitting, while having more units is usually not harmful # - with appropriate regulartion(like dropout amd others) # - trade off is computationally expensive to train deeper networks # ## Activation type # - activation function introduce nonlinearity to our neurons # - without activation our neurons would pass linear combination # - The best way to build baseline architecture is to look at the popular architectures available to solve # - specific problems and start from there # ## Learning and Optimization # - learning rate and decay schedule # - the learning rate is the single most important hyperparameter, should always tuned # # # - if the learning rate is much larger than the ideal lr value # - optimizer will not only overshoot the ideal weight, but get fatehr and father from the min error # - this phenomenon is called divergence # # ## A systematic approach to find the optimal learning rate # - the bestway to debug learning rate is look at the validation loss and training in verbose # - if val loss decrease after each step, great # # # - if training stop and val_loss still decreasing, then learning rate was so smal that it didn't converge yet # - train again with same learning rate and more iterations # # # - if val loss start to increase and oscillate up and down # - learning rate is too high and nneed to decrease it values # ## Learning rate decay and adaptive learning # - another way to go tuning learning rate is to set a learning rate, # - method by changes learning rate during training # # # - this method performs better than static value, # - drastivcally reduces the time required to get optimal results # # - decay rate : our network to implement a decay rate in our leaning rate # - decay rate : reducing learning rate linearly, exponentially # - linearly decay : half half half # - exponently decay : multiply 0.1 every decay # # mini-batch size # - big effect on resource requirements of the training process and speed # - to understand mini-batch. back up to three GD types # - Batch gradient descent : after 1 epoch backpropagation # - stochastic gradient descent : every single batch it backporgation # - minibatch gradient descent - divide mini-batches to compute the gradient # # optimization algorithms 1 # - SGD optimization # - sgd oscillations in the vertical direction # - this oscillations make it harder to use larger learning rates # - To reduce these oscillations, momentum was invented # - lets the GD navigate along relevant directions and softens the oscillation in irrelevant direc- # tions # # - momentum is simply add weighted average of past gradients(velocity term) to backpropagation # - Adam # - adaptive moment estimation # - keeps an exponentially decaying average of past gradients # - whereas momentum can be seen as a ball rolling down a slope # keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, # decay=0.0) # - adam is usally great optimization for all # # when Improving train_error while not improving val_error # - means the network is starting to overfit the training data and # - failing to generalize to the validation data.
DeepLearningForVisionSystem/Part4.Structing_DL_projects_and_hyperparameter_tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/fonslucens/test_deeplearning/blob/master/Notescale_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="l_IFEwLqlJDd" note_seq = ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4', 'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4', 'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4'] # + id="EPNufLQRn1IH" colab={"base_uri": "https://localhost:8080/"} outputId="c7c4efa0-3468-4ddc-9014-d567e6ac7dea" note_seq[0:4], note_seq[1:5], note_seq[2:6] # + id="k_0CBe0wlgXo" code2idx = {'c4':0, 'd4':1, 'e4':2, 'f4':3, 'g4':4, 'a4':5, 'b4':6, 'c8':7, 'd8':8, 'e8':9, 'f8':10, 'g8':11, 'a8':12, 'b8':13} # + colab={"base_uri": "https://localhost:8080/"} id="UUey5eEMmTUk" outputId="9f20a61f-dbaa-4efc-908c-e07f87b54275" len(note_seq), range(len(note_seq)-4) # [4,8,12,....] # + colab={"base_uri": "https://localhost:8080/"} id="VmI-iyeEUyUP" outputId="09f89d9c-c90d-46f5-e7f3-0a04b12412d1" code2idx['g8'] # + colab={"base_uri": "https://localhost:8080/"} id="_hShpUi7lgrG" outputId="ac48b846-6906-4d88-900c-53db42393b9e" dataset = list() for i in range(len(note_seq)-4) : subset = note_seq[i:i+4] items = list() # print(subset) for item in subset: # print(code2idx[item]) items.append(code2idx[item]) # print(items) dataset.append(items) print(dataset) # + id="5pQcjTDnV1V5" import numpy as np datasets = np.array(dataset) # + id="IU5-nPwQoxKQ" colab={"base_uri": "https://localhost:8080/"} outputId="b1413893-4f18-4643-b52a-87a9869d5190" x_train = datasets[:,0:3] x_train.shape #x_train # + colab={"base_uri": "https://localhost:8080/"} id="fhFXJSEbWAM4" outputId="635cc42e-ca0c-4af9-e56f-76addb609027" y_train = datasets[:,3] y_train.shape #y_train # + colab={"base_uri": "https://localhost:8080/"} id="xvmllAHDWV6V" outputId="cd74822b-d568-4758-a43b-601062936bb5" len(code2idx) # + colab={"base_uri": "https://localhost:8080/"} id="fnNSpUyCXPeV" outputId="b64e49aa-e35b-4075-c8d9-55f2a199cf77" x_train = x_train /13 #len(code2idx) x_train[3] # + [markdown] id="gaYOHb3GXnNZ" # # make model # + id="_BCkBt3gXni3" import tensorflow as tf # + colab={"base_uri": "https://localhost:8080/"} id="WLWpPMj-eJff" outputId="f72056a8-4949-4702-fee3-78df5e3d543d" x_train.shape, x_train[2] # --> tensorfloq type(tensor) (50, 3, 1), --> metrix # + colab={"base_uri": "https://localhost:8080/"} id="c8wveeTBeSbL" outputId="1afc8160-2523-434b-95ad-061272d3bd78" X_train = np.reshape(x_train, (50, 3, 1)) #tensor X_train.shape, X_train[2] # + colab={"base_uri": "https://localhost:8080/"} id="jZOCiXhEX_eT" outputId="e4ee00cc-8c43-47d8-9d83-e5ba6a7203ae" model = tf.keras.models.Sequential() model.add(tf.keras.Input(shape=(3,1))) # input layer model.add(tf.keras.layers.LSTM(128)) #hidden layer model.add(tf.keras.layers.Dense(13, activation='softmax')) # output layer model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) #gadget # + colab={"base_uri": "https://localhost:8080/"} id="E3AWfEihhpCE" outputId="4cdbdcd0-3308-425c-bb03-f1dc493b4a28" hist = model.fit(X_train, y_train, epochs=500, batch_size=10) # 50 / 5 = 10 # + colab={"base_uri": "https://localhost:8080/"} id="i5F2zN0WlgP3" outputId="1cc4ee4b-9813-4631-d3ad-50c30f8deeed" model.evaluate(X_train, y_train) # + colab={"base_uri": "https://localhost:8080/"} id="0CQob1YMsXZm" outputId="a17465ca-cb4e-426b-af04-44eadbfae8c3" X_train[4:5] # + colab={"base_uri": "https://localhost:8080/"} id="m6rCe_Ksq9rO" outputId="d74de115-b27e-45b1-d675-1fa7b5a40fb5" model.predict(X_train[4:5]) # + id="HgvqvHFwra79" first = 0.61538462 second = 0.07692308 third = 0.53846154 # + id="Pyto5Wkcshdn" pred = model.predict([[[first], [second], [third]]]) # + colab={"base_uri": "https://localhost:8080/"} id="_n5P2eX1ttSR" outputId="863c61de-300d-4e80-84dd-70103ce622f0" model.predict(X_train[0:1]) # + colab={"base_uri": "https://localhost:8080/"} id="phGVZANWsrpO" outputId="d06749d0-21b8-464f-a6e4-0dbea4cbb376" np.argmax(pred) # + id="MVuCvuPosulF" code2idx = {'c4':0, 'd4':1, 'e4':2, 'f4':3, 'g4':4, 'a4':5, 'b4':6, 'c8':7, 'd8':8, 'e8':9, 'f8':10, 'g8':11, 'a8':12, 'b8':13} # + id="LpVtcSwAs5ru" note_seq = ['g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'd8', 'e8', 'f8', 'g8', 'g8', 'g4', 'g8', 'e8', 'e8', 'e8', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4', 'd8', 'd8', 'd8', 'd8', 'd8', 'e8', 'f4', 'e8', 'e8', 'e8', 'e8', 'e8', 'f8', 'g4', 'g8', 'e8', 'e4', 'f8', 'd8', 'd4', 'c8', 'e8', 'g8', 'g8', 'e8', 'e8', 'e4']
Notescale_LSTM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 2021년 5월 5일 수요일 # ### Programmers - 2020 카카오 인턴십 - 키패드 누르기 (Python) # ### 문제 : https://programmers.co.kr/learn/courses/30/lessons/67256 # ### 블로그 : https://somjang.tistory.com/entry/Programmers-2020-%EC%B9%B4%EC%B9%B4%EC%98%A4-%EC%9D%B8%ED%84%B4%EC%8B%AD-%ED%82%A4%ED%8C%A8%EB%93%9C-%EB%88%84%EB%A5%B4%EA%B8%B0-Python # ### Solution # + def get_distance(keypad, finger_position, next_number): next_number_position = keypad[next_number] distance = abs(finger_position[0] - next_number_position[0]) + abs(finger_position[1] - next_number_position[1]) return distance def solution(numbers, hand): result = '' keypad = { 1: [0, 0], 2: [0, 1], 3: [0, 2], 4: [1, 0], 5: [1, 1], 6: [1, 2], 7: [2, 0], 8: [2, 1], 9: [2, 2], 0: [3, 1] } left_finger_numbers = [1, 4, 7] right_finger_numbers = [3, 6, 9] center_finger_numbers = [2, 5, 8, 0] left_finger_position = [3, 0] right_finger_position = [3, 2] for number in numbers: if number in left_finger_numbers: result += 'L' left_finger_position = keypad[number] elif number in right_finger_numbers: result += 'R' right_finger_position = keypad[number] else: left_finger_distance = get_distance(keypad, left_finger_position, number) right_finger_distance = get_distance(keypad, right_finger_position, number) if left_finger_distance > right_finger_distance: result += 'R' right_finger_position = keypad[number] elif left_finger_distance < right_finger_distance: result += 'L' left_finger_position = keypad[number] elif left_finger_distance == right_finger_distance: result += hand[0].upper() if hand == 'right': right_finger_position = keypad[number] elif hand == 'left': left_finger_position = keypad[number] return result
DAY 301 ~ 400/DAY356_[Programmers] 2020 카카오 인턴십 - 키패드 누르기 (Python).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Put these at the top of every notebook, to get automatic reloading and inline plotting # %reload_ext autoreload # %autoreload 2 # %matplotlib inline # + import sys sys.path.append("/home/ec2-user/anaconda3/external/") # This file contains all the main external libs we'll use from fastai.imports import * from fastai.transforms import * from fastai.conv_learner import * from fastai.model import * from fastai.dataset import * from fastai.sgdr import * from fastai.plots import * import tqdm # - PATH = "../Data/kaggle_dog_cat/" sz=224 bs=128 arch=resnet34 torch.cuda.is_available() torch.backends.cudnn.enabled m = arch(True) m m = nn.Sequential(*children(m)[:-2], nn.Conv2d(512, 1024, 3), nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(1024,2), nn.LogSoftmax()) tfms = tfms_from_model(arch, sz, aug_tfms=transforms_side_on, max_zoom=1.1) data = ImageClassifierData.from_paths(PATH, tfms=tfms, bs=bs) learn = ConvLearner.from_model_data(m, data) learn learn.freeze_to(-5) m[-5].trainable learn.fit(0.01, 1,best_save_name='resnet_1') learn.load('resnet_1') class SaveFeatures(): features=None def __init__(self, m): self.hook = m.register_forward_hook(self.hook_fn) def hook_fn(self, module, input, output): self.features = output def remove(self): self.hook.remove() x,y = next(iter(data.val_dl)) sfs = SaveFeatures(m[-3]) # + features = [] outputs = [] #for x,y in iter(data.val_dl): for x,y in iter(data.trn_dl): sfs = SaveFeatures(m[-3]) if len(outputs)>5000: break py = m(Variable(x.cuda())) sfs.remove() features_res = to_np(sfs.features) #import pdb;pdb.set_trace() y = to_np(y) for i in range(len(y)): features.append(np.ravel(features_res[i])) outputs.append(y[i]) # - features = np.array(features) outputs = np.array(outputs) features.shape np.save('../Data/kaggle_dog_cat/val_features.npy',features) np.save('../Data/kaggle_dog_cat/val_outputs.npy',outputs) np.save('../Data/kaggle_dog_cat/trn_features.npy',features) np.save('../Data/kaggle_dog_cat/trn_outputs.npy',outputs)
Model/dog_vs_cat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Iris Classification with Keras # In this project, we build and train a model to categorize the Iris flower base the sample data [1]. The algorithm we chose to implement for this project is multilayer perceptron. The Iris data set is available from the University of California at Irvine (UCI) Machine Learning Repository. The dataset consists of information on 150 Iris flowers [2]. We focus on learning three Iris species which are Setosa, Versicolour, and Virginica. The dataset is characterized by five attributes: # 1. Sepal Length in centimeters # 2. Sepal Width in centimeters # 3. Petal Length in centimeters # 4. Petal Width in centimeters # 5. Targeting Class (Setosa, Vericolour, Virginica) # |![setosa](/images/iris-setosa-1.jpeg)|![versicolor](/images/Iris_versicolor_3.jpg)|![virginica](/images/iris_virginica.jpeg)| # |:---|:---:|---:| # |Iris Setosa|Iris Versicolor|Iris Virginica| # ## Learning Algorithm # ### Multilayer Artificial Neural Network # We designed a Neural Network with very simple structure. The network contain 4 layers which are input, hidden, and output layers. The input layers consists of four nodes. The input for the network is a vector of Iris features which are Sepal Length, Sepal Width, Petal Length, and Petal Width [3]. There are two hidden layers with 10 nodes on each of the layers. We chose to use linear function, Relu, as the activation functions. The predicted probability of the Iris class will send to the output layer which represents Iris flower classes. The figure below shows the model Neuro Network [4]. # ![NeuralNet.png](/images/NeuralNet.png) # There are various types of Neuro Networks. In this project, we chose to build the fully connected Neuro Networks, also known as Dense networks. The neurons from the input layer are connected each neurons in the first hidden layer. The same way applys to second and final output layers [5]. There is a single bias unit that is connected to each neurons in the hidden layers. The neurons in the hidden layers compute the weighted sum from the inputs to form the scalar net activation. We write the equation as in the figure below: # \begin{equation*} # \mathbf{y} = \mathbf{\sum_{i=1}^n x_i w_i} + \mathbf{bias} # \end{equation*} # The subscript i indexes neurons in the input layer. The w denotes the input to hidden layer weights at the hidden layer neurons. Such weights are also named synapses and the values of the connections the synaptic weights. The output y can be thought of as a function of input feature vector x. When there are k output neurons, we can think of the network as computing k discriminant functions and classify the input according to which discriminant function is the largest. # In this project, we express the Neuro Network to Mathematical functions as: # \begin{equation*} # \mathbf{\output} = \mathbf{\left(\sum_{j=1}^n w_j_m\left(\sum_{i=1}^n w_i_n\left(\sum_{q=1}^n w_q_o x + bias_q\right) + bias_n\right)+bias_m\right)} # \end{equation*} # The Wjm here denotes the weights in layer m and Win denotes the weights in layer n [6]. The bias_q, bias_n, and bias_m denote the bias vector in layer q, n, and m accordingly. The output is a vector of dimension of three which represents the probability of the three classes. # ### Training Error # The training error for the model that we build and train for this project to be the least mean square error which is sum over the output neurons of the squared difference between the actual desired value z and the actual ouput t. The equation is denoted below: # \begin{equation*} # \mathbf{E\left(w\right)} = \mathbf{\frac{1}{2}} \mathbf{\sum_{k=1}^n \left(z_k-t_k\right)^2} # \end{equation*} # where z and t are the desired value and actual network output. The purpose for applying least mean square error and gradient descent is to train the model learning weights. The weights are initialized with value zeros, and then changed in a direction that will reduce error: # \begin{align} # \mathbf{\nabla w} = mathbf{-n} mathbf{\frac{\partial\mathbf{E}}{\partial\w} # \end{align} # where n is the learning rate which indicates the relative size of the change in weights [7]. The weight values is updated as follow: # \begin{align} # \mathbf{W(k+1)}=\mathbf{W(k)} \mathbf{-n} \frac{\partial\mathbf{E}}{\mathbf{\partial w}} # \end{align} # Because the error function is given analytically, it is differentiable. The partial derivative with respect to w is: # \begin{align} # \frac{\partial{\mathbf{E}}}{\partial\mathbf{w}}=-\mathbf{(z-w^t x)x} # \end{align} # Substituting the result into the weight update equation, the resulting function is as below: # \begin{align} # \mathbf{W(k+1)}=\mathbf{W(k)} - \mathbf{n(z-w^tx)x} # \end{align} # where n is the learning rate, range 0<n<2 for the learning process to converge. In practice, we iterate the learning process to a specified threshold. The final computation in the hidden layers is the activation function. We chose the rectifier linear unit (ReLU) as the activation function for our network. # ## Training Dataset Processing # In this project, we download the training dataset from "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv" and store it onto the local hard drive. # We use the tf.data.experimental.make_csv_dataset to parse the training dataset [8]. The output is the file consisting a tuple of Iris features and corresponding labels. The size of the file is the number of rows in the batch. # + # Total 120 rows of Iris features. # Chosing first 32 rows of data for training. train_data_location = "C:/Users/JunnanLu123/iris_training.csv" train_data_filePt = (train_file_fp, 32, column_names = column_names, label_name = label_name, num_epochs = 1) # + # The columns of the data set. column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'] # The first four fields are flower features # representing flower measurements. feature_names = column_names[:-1] # The last column is the flower label which we # hope the model could predict in high accuracy. label_name = column_names[-1] # + # Each class is associated with the string name such as setosa. # The class name are mapped to then list representation such as # 0, Iris setosa # 1, Iris versicolor # 2, virginica class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica'] # + # The input data is a file of CSV format. Therefore, we call # tf.data.experimental.make_csv_dataset to make the training # dataset. The make_csv_dataset returns a tf.data.Dataset which # consists of feature and label pairs. train_dataset = tf.data.experimental.make_csv_dataset(train_file_fp, 32, column_names = column_names, label_name = label_name, num_epochs = 1) # + # The train_dataset object is iterable. The return value is a tuple # of features and label pair. The label here denotes the flower # classes. self.features, self.label = next(iter(train_dataset)) # - # Each row of the sample data is corresponding to the feature array which is grouped together in a batch. We set the default batch size to 32. # + # We call tf.stack to create a combined tensor at # the specified dimension. And we pack the features # into a single array. def pack(self, features, label): self.features = tf.stack(list(features.values()), axis=1) return features, label # + # Each features such as (feature, label) pair mapped into training # dataset by using tf.data.Dataset.map. dataset = dataset.map(pack) # - # We plot the figure to visualize the flower features. The batch size is 32 rows of feature and label pairs. # + # Visualize the flower data from the dataset batch. # The batch size is 32 rows of feature and label pairs. plt.scatter(self.features['petal_length'], self.features['petal_width'], c=self.label, cmap='viridis') plt.xlabel('petal_length') plt.ylabel('petal_width') plt.show() # - # The figure on the left displays the distribution of features for sepal length and sepal width. The figure on the right displays the features for petal length and petal width. # |![sepal](/images/Feature1.png)|![petal](/images/Feature2.png)| # |:---|:---:| # |Sepal Length and Width|Petal Length and Width| # ## Model Building and Training With Keras # ### Setup Tensorflow and Keras # We implement the model layers with tf.keras API. The API provides the way to handle the complexity of connections among model layers. The layer is the fundamental data structure for processing the Iris data for our project. In this project, our network topology is that layers are densly connected. # + # Setup tensorflow and Keras modules import tensorflow as tf from tensorflow.keras import layers # - # ### Create Model with Keras # For each layer there is only one input tensor and one output tensor. If the topology is non-linear which there is a residual connection or needing layer sharing, then, our model will not produce the optimal results. Therefore, we apply the tf.keras.Sequential() model which is a linear model of layers. # + # We create layers that only accept input of 2D tensors # where the input dimension is 4xNone. The axis 0 could # be any value since it is not specified. The first # hidden layer requires an input with dimension of # 10. And the same way goes for the second hidden layer # The final output layer will return a tensor with # dimension of 3 denoting Setosa, Versicolor, and # Verginica respectively. model = tf.keras.Sequential() # Here we chose Rectifier linear Unit, ReLu, as the activation # function. model.add(tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,))) model.add(tf.keras.layers.Dense(10, activation=tf.nn.relu)) model.add(tf.keras.layers.Dense(3)) # - # ### The Loss and Gradient Function # As mentioned earlier, we use gradient descent algorithm to update weights of the layers to minimize the computing errors. We calculate the error by using tf.keras.losses.SparseCategoricalCrossentropy() function. The loss function returns the average loss across the training data. # + # Computes the sparse categorical crossentropy loss loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # - # We use tf.GradientTape to record automatic differetiation. In our model, the trainable variable are the weights and bias which are being watched automatically. # + # GradientTape automatically watch trainable variable, # the resource will be released when GradientTape.gradient() # method is called. Using gradient to calculate and optimize # gradients of the model. with tf.GradientTape() as Tape: prediction = model(features, training=True) loss = loss_object(label, prediction) grads = Tape.gradient(loss, model.trainable_variables) # - # ### Create The Optimizer # The next step is to minimize the loss function defined above. We choose to use the tf.keras.optimizers.SGD() to update the set of weights and bias in our network. The gradient descent method is commonly used to update changes. # + # We create the optimizer to compute loss and set the learning # rate to 0.01 optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) # - # ### Train The Model # The goal for our model is to learn from the training data and then is able to predict unseen data with high accuracy. Overfitting problem occurs when model is producing high accurate resutls when feeding with training data but performing poorly when feeding unseen data. The training process consists 100 epochs. We iterate through each epoch. And for each of the epochs, we then iterate through each of the 32 training dataset to let model to make predictions. Then, applying the loss and optimization methods mentioned earlier to update the weights and bias of the model. # + # Model Training Process Using Batch of 32 Samples # Each epoch is one pass through the dataset # Keep tracking the value for plotting. loss_value_list=[] model_accuracy_list=[] for epochs in range(100): loss_value = tf.keras.metrics.Mean() accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Training the model through each of the training sample # Iterate through each example of the feature and label of # the dataset. for features, label in dataset: # GradientTape automatically watch trainable variable, # the resource will be released when GradientTape.gradient() # method is called. Using gradient to calculate and optimize # gradients of the model. with tf.GradientTape() as Tape: prediction = model(features, training=True) loss = loss_object(label, prediction) grads = Tape.gradient(loss, model.trainable_variables) # Optimizer implemented the Stochastic Gradient Decent method. optimizer.apply_gradients(zip(grads, model.trainable_variables)) # Tracking the stats of the loss and accuracy value for visualization loss_value.update_state(loss) accuracy.update_state(label, model(features, training=True)) # Append the loss value and accuracy result to the lists loss_value_list.append(loss_value.result()) model_accuracy_list.append(accuracy.result()) # - # ### Visualize the Loss Function # + # Setup the plot by using matplotlib module. import matplotlib.pyplot as plt # + # Visualize the loss in time elapsed fashion. # TensorBoard can also help to visualize the training metrics. # We are using matplotlib to create basic metrics chart. fig1, axes = plt.subplots(2, sharex=True, figsize=(12,8)) fig1.suptitle("Train Metrics") axes[0].set_ylabel("LOSS", fontsize=14) axes[0].plot(loss_value_list) axes[1].set_ylabel("ACCURACY", fontsize=14) axes[1].set_xlabel("EPOCHS", fontsize=14) axes[1].plot(model_accuracy_list) plt.show() # - # Figures below present training loss and accuracy over time. We are happy that figures show no suprising behaviour from our model. The loss and accuracy values are satisifying our expactations. # |![LOSS](/images/LossFig.png)|![ACCURACY](/images/Accuracy.png)| # |:---|---:| # |Training loss|Training Accuracy| # ## Model Testing # We trained our model. Now, we try to get the metrics by testing the model with testing data. Similar to model training, we download the test dataset and process the test data with tf.data.experimental.make_csv_dataset to parse the testing data. However, the difference is that, when in model testing phase, we set the training parameter in model to false. # ### Parsing Testing Data # We download the test data from https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv. Processing testing data is similar to training data. # + # Model evaluation using test data # Using single epoch to test the model. # Dataset is downloaded and store in local machine. # Parsing the test data. dataset = self.makeDataCSV("C:/Users/JunnanLu123/iris_test.csv") dataset = dataset.map(self.pack2) # - # Unlike training the model, we iterate through each row of the testing data in one epoch and compute the predictions. Then, we compare the model output to the actual value. # + # Calculates how often predictions equals labels. accuracy = tf.keras.metrics.Accuracy() test_accuracy_list=[] # + # We iterate every sample from the dataset and compare to the # model's prediction against the actual to measure model's # accuracy across the entire test dataset. for features, label in dataset: prediction = model(features, training=False) prediction = tf.nn.softmax(prediction, axis=1,name='softmax') prediction = tf.argmax(prediction, axis=1, output_type=tf.dtypes.int32) print (prediction) accuracy.update_state(label, prediction) print (accuracy.result()) test_accuracy_list.append(accuracy.result()) print ("TEST Accuracy {:1.2%}".format(accuracy.result())) # - # Our model archives test accuracy at 93.33% after 100 epchos of model training loops. # ## Using Trained Model to Predict # Now, we are going to use our trained model to compute some unlabeled data. The dataset is downloaded from "https://gist.github.com/curran/a08a1080b88344b0c8a7#file-iris-csv". We parse and extract the feature data from the dataset similar to methods mentioned earlier. The only difference is that we choose first 10 rows of data to make a batch. # + # Making data from the CSV file with specified batch size # The tf.data.experimental.make_csv_dataset method return # parsed dataset def makeDataFromCSV(self, name, batchSize): # Specified column names to make a suitable format column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species'] feature_names = column_names[:-1] label_name = column_names[-1] dataset = tf.data.experimental.make_csv_dataset(name, batch_size=batchSize, column_names = column_names, label_name = label_name, num_epochs = 1) return dataset # + # Extracting features from the dataset. In here, # we take 10 samples from the features data # letting our model to predict predict_data = makeDataFromCSV("C:/Users/Byzantin/source/repos/Data1/Data1/iris1.csv",10) predict_data = predict_data.map(self.pack2) features, labels = unpackData(predict_data) # + # Helper function for making the feature and # label batch def unpackData(self, dataset): features, labels = next(iter(dataset)) return features, labels # + # List of three class names # Labeled data mapped with list index. class_name = ['Iris Setosa', 'Iris Versicolor', 'Iris Virginica'] # + # Using the trained model to predict unlabeled data. # prediction stores model result. prediction = model(features, training=False) # Iterating through the model result. for index, logits in enumerate(prediction): # Record the class index with maximum probability. class_index = tf.argmax(logits).numpy() # Calculate the class softmax probability. probability = tf.nn.softmax(logits)[class_index] * 10 # Display the index, class name, and predicted class probability name = class_name[class_index] print ("index {}, class: {}, probability {:2.2%}".format(index,name,probability)) # - # The figure below denotes the model predict results for 10 rows of input data. # ![RESULTS](/images/predict.png) # The figure below presenting the loss surface in 3D plot. # ![LOSS](/images/Loss.png) # ## Summary # We build and train a machine learning model to predict the Iris flowers. The model, that we build, has four layers with 2 hidden layers. The final output layer consists of three outputs denoting three classes of Iris flower. We choose the m Our model is implemented by Python, Tensorflow, and Keras machine learning framework. The learning algorithm for our model to predict is forward multilayer Neuro Networks. We refine the learning algorithm by update learning metrics such as network weights and bias with Stochastic Gradient Descent algorithm. During testing phase, the model reaches the accuracy at 93.33%. We, then, are able to use our trained model to predict flowers on unlabeled data. In the future, we will need to test our model on noise data. Also, we intend to implement back propagation algorithm to train our model and compare the model performace with just feed forward training. # ## References # [1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Tensorflower Gardener Flower. https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough # [2] <NAME>, <NAME>, <NAME>. *Patter Classfication*. John Wiley & Sons Inc, United States, 2006. # [3] <NAME>, <NAME>. *Digital Image Processing*. Pearson Education Inc, United States, 2018. # [4] <NAME>, <NAME>, <NAME>. *Introduction to Data Mining*. Pearson Education Inc, United States, 2008. # [5] <NAME>, <NAME>. *An Experimental Evaluation of Giraph and Graphchi*. FAB-ASONAM, 2016. # [6] <NAME> And <NAME>. On the boundedness of an iterative procedure for solving a system of linear inequalities. *Proceedings of the American Mathematics Society,* 26:229-235, 1970. # [7] <NAME>, <NAME>, and T<NAME>. *Regularization Theory and Neuro Networks Architectures*. Neural Computation, 219-269, 1995. # [8] <NAME>. *A learning scheme for asymmetric threshold networks*. *Proceeding of Cognitiva 85*, 599-604, France, 1985.
Iris_data_Machine_Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # # Overview of some project # # ## Modelling # 1. [data (pre)processing](data.ipynb): raw data is prepared for further use (e.g. getting rid of outliers, filling gaps) # 2. [model setup](setup.ipynb): model setups are summarized and compared # 3. [model skill](skill.ipynb): model skill is assessed for multiple models #
talk_5_notebooks_vs_scripts/notebooks_project_work/nb_overview.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # # Greybox Fuzzing # # In the [previous chapter](MutationFuzzer.ipynb), we have introduced _mutation-based fuzzing_, a technique that generates fuzz inputs by applying small mutations to given inputs. In this chapter, we show how to _guide_ these mutations towards specific goals such as coverage. The algorithms in this book stem from the popular [American Fuzzy Lop](http://lcamtuf.coredump.cx/afl/) (AFL) fuzzer, in particular from its [AFLFast](https://github.com/mboehme/aflfast) and [AFLGo](https://github.com/aflgo/aflgo) flavors. We will explore the greybox fuzzing algorithm behind AFL and how we can exploit it to solve various problems for automated vulnerability detection. # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "subslide"} # **Prerequisites** # # * Reading the introduction on [mutation-based fuzzing](MutationFuzzer.ipynb) is recommended. # + slideshow={"slide_type": "skip"} import bookutils # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Ingredients for Greybox Fuzzing # # We start with discussing the most important parts we need for mutational testing and goal guidance. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ### Background # # AFL is a *mutation-based fuzzer*. Meaning, AFL generates new inputs by slightly modifying a seed input (i.e., mutation), or by joining the first half of one input with the second half of another (i.e., splicing). # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # AFL is also a *greybox fuzzer* (not blackbox nor whitebox). Meaning, AFL leverages coverage-feedback to learn how to reach deeper into the program. It is not entirely blackbox because AFL leverages at least *some* program analysis. It is not entirely whitebox either because AFL does not build on heavyweight program analysis or constraint solving. Instead, AFL uses lightweight program instrumentation to glean some information about the (branch) coverage of a generated input. # If a generated input increases coverage, it is added to the seed corpus for further fuzzing. # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # To instrument a program, AFL injects a piece of code right after every conditional jump instruction. When executed, this so-called trampoline assigns the exercised branch a unique identifier and increments a counter that is associated with this branch. For efficiency, only a coarse branch hit count is maintained. In other words, for each input the fuzzer knows which branches and roughly how often they are exercised. # The instrumentation is usually done at compile-time, i.e., when the program source code is compiled to an executable binary. However, it is possible to run AFL on uninstrumented binaries using tools such as a virtual machine (e.g., [QEMU](https://github.com/mirrorer/afl/blob/master/qemu_mode)) or a dynamic instrumentation tool (e.g., [Intel PinTool](https://github.com/vanhauser-thc/afl-pin)). For Python programs, we can collect coverage information without any instrumentation (see chapter on [collecting coverage](Coverage.ipynb#Coverage-of-Basic-Fuzzing)). # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ### Mutator and Seed # # We introduce specific classes for mutating a seed. # + slideshow={"slide_type": "skip"} import random from Coverage import Coverage, population_coverage # + [markdown] slideshow={"slide_type": "fragment"} # First, we'll introduce the `Mutator` class. Given a seed input `inp`, the mutator returns a slightly modified version of `inp`. In the [chapter on greybox grammar fuzzing](GreyboxGrammarFuzzer.ipynb), we extend this class to consider the input grammar for smart greybox fuzzing. # + slideshow={"slide_type": "fragment"} class Mutator(object): def __init__(self): self.mutators = [ self.delete_random_character, self.insert_random_character, self.flip_random_character ] # + [markdown] slideshow={"slide_type": "subslide"} # For insertion, we add a random character in a random position. # + slideshow={"slide_type": "fragment"} class Mutator(Mutator): def insert_random_character(self,s): """Returns s with a random character inserted""" pos = random.randint(0, len(s)) random_character = chr(random.randrange(32, 127)) return s[:pos] + random_character + s[pos:] # + [markdown] slideshow={"slide_type": "fragment"} # For deletion, if the string is non-empty choose a random position and delete the character. Otherwise, use the insertion-operation. # + slideshow={"slide_type": "subslide"} class Mutator(Mutator): def delete_random_character(self,s): """Returns s with a random character deleted""" if s == "": return self.insert_random_character(s) pos = random.randint(0, len(s) - 1) return s[:pos] + s[pos + 1:] # + [markdown] slideshow={"slide_type": "fragment"} # For substitution, if the string is non-empty choose a random position and flip a random bit in the character. Otherwise, use the insertion-operation. # + slideshow={"slide_type": "subslide"} class Mutator(Mutator): def flip_random_character(self,s): """Returns s with a random bit flipped in a random position""" if s == "": return self.insert_random_character(s) pos = random.randint(0, len(s) - 1) c = s[pos] bit = 1 << random.randint(0, 6) new_c = chr(ord(c) ^ bit) return s[:pos] + new_c + s[pos + 1:] # + [markdown] slideshow={"slide_type": "fragment"} # The main method is `mutate` which chooses a random mutation operator from the list of operators. # + slideshow={"slide_type": "subslide"} class Mutator(Mutator): def mutate(self, inp): """Return s with a random mutation applied""" mutator = random.choice(self.mutators) return mutator(inp) # + [markdown] slideshow={"slide_type": "fragment"} # Let's try the mutator. You can actually interact with such a "cell" and try other inputs by loading this chapter as Jupyter notebook. After opening, run all cells in the notebook using "Kernel -> Restart & Run All". # + slideshow={"slide_type": "fragment"} Mutator().mutate("good") # + [markdown] slideshow={"slide_type": "subslide"} # ### Power Schedules # # Now we introduce a new concept; the *power schedule*. A power schedule distributes the precious fuzzing time among the seeds in the population. Our objective is to maximize the time spent fuzzing those (most progressive) seeds which lead to higher coverage increase in shorter time. # # We call the likelihood with which a seed is chosen from the population as the seed's *energy*. Throughout a fuzzing campaign, we would like to prioritize seeds that are more promising. Simply said, we do not want to waste energy fuzzing non-progressive seeds. We call the procedure that decides a seed's energy as the fuzzer's *power schedule*. For instance, AFL's schedule assigns more energy to seeds that are shorter, that execute faster, and yield coverage increases more often. # # First, there is some information that we need to attach to each seed in addition to the seed's data. Hence, we define the following `Seed` class. # + slideshow={"slide_type": "subslide"} class Seed(object): def __init__(self, data): """Set seed data""" self.data = data def __str__(self): """Returns data as string representation of the seed""" return self.data __repr__ = __str__ # + [markdown] slideshow={"slide_type": "subslide"} # The power schedule that is implemented below assigns each seed the same energy. Once a seed is in the population, it will be fuzzed as often as any other seed in the population. # # In Python, we can squeeze long for-loops into much smaller statements. # * `lambda x: ...` returns a function that takes `x` as input. Lambda allows for quick definitions unnamed functions. # * `map(f, l)` returns a list where the function `f` is applied to each element in list `l`. # * `np.random.choice(l,p)` returns element `l[i]` with probability in `p[i]`. # + slideshow={"slide_type": "skip"} import numpy as np # + slideshow={"slide_type": "subslide"} class PowerSchedule(object): def assignEnergy(self, population): """Assigns each seed the same energy""" for seed in population: seed.energy = 1 def normalizedEnergy(self, population): """Normalize energy""" energy = list(map(lambda seed: seed.energy, population)) sum_energy = sum(energy) # Add up all values in energy norm_energy = list(map(lambda nrg: nrg/sum_energy, energy)) return norm_energy def choose(self, population): """Choose weighted by normalized energy.""" import numpy as np self.assignEnergy(population) norm_energy = self.normalizedEnergy(population) seed = np.random.choice(population, p=norm_energy) return seed # + [markdown] slideshow={"slide_type": "subslide"} # Let's see whether this power schedule chooses seeds uniformly at random. We ask the schedule 10k times to choose a seed from the population of three seeds (A, B, C) and keep track of the number of times we have seen each seed. We should see each seed about 3.3k times. # + slideshow={"slide_type": "subslide"} population = [Seed("A"), Seed("B"), Seed("C")] schedule = PowerSchedule() hits = { "A" : 0, "B" : 0, "C" : 0 } for i in range(10000): seed = schedule.choose(population) hits[seed.data] += 1 hits # + [markdown] slideshow={"slide_type": "fragment"} # Looks good. Every seed has been chosen about a third of the time. # + [markdown] slideshow={"slide_type": "subslide"} # ### Runner and Sample Program # We'll start with a small sample program of six lines. In order to collect coverage information during execution, we import the `FunctionCoverageRunner` class from the chapter on [mutation-based fuzzing](MutationFuzzer.ipynb#Guiding-by-Coverage). # # The `FunctionCoverageRunner` constructor takes a Python `function` to execute. The function `run` takes an input, passes it on to the Python `function`, and collects the coverage information for this execution. The function `coverage()` returns a list of tuples `(function name, line number)` for each statement that has been covered in the Python `function`. # + slideshow={"slide_type": "skip"} from MutationFuzzer import FunctionCoverageRunner # + [markdown] slideshow={"slide_type": "fragment"} # The `crashme()` function raises an exception for the input "bad!". Let's see which statements are covered for the input "good". # + slideshow={"slide_type": "subslide"} def crashme (s): if len(s) > 0 and s[0] == 'b': if len(s) > 1 and s[1] == 'a': if len(s) > 2 and s[2] == 'd': if len(s) > 3 and s[3] == '!': raise Exception() # + slideshow={"slide_type": "fragment"} crashme_runner = FunctionCoverageRunner(crashme) crashme_runner.run("good") list(crashme_runner.coverage()) # + [markdown] slideshow={"slide_type": "fragment"} # In `crashme`, the input "good" only covers the if-statement in line 2. The branch condition `len(s) > 0 and s[0] == 'b'` evaluates to False. # + [markdown] slideshow={"slide_type": "slide"} # ## Blackbox, Greybox, and Boosted Greybox Fuzzing # # ### Blackbox Mutation-based Fuzzer # Let's integrate both the mutator and power schedule into a fuzzer. We'll start with a blackbox fuzzer -- which does *not* leverage any coverage information. # # Our `MutationFuzzer` class inherits from the [Fuzzer](Fuzzer.ipynb#Fuzzer-Classes) class. For now, we only need to know the functions `fuzz` which returns a generated input and `runs` which executes `fuzz` a specified number of times. For our `MutationFuzzer` class, we override the function `fuzz`. # + slideshow={"slide_type": "skip"} from Fuzzer import Fuzzer # + [markdown] slideshow={"slide_type": "subslide"} # The `MutationFuzzer` is constructed with a set of initial seeds, a mutator, and a power schedule. Throughout the fuzzing campaign, it maintains a seed corpus called `population`. The function `fuzz` returns either an unfuzzed seed from the initial seeds, or the result of fuzzing a seed in the population. The function `create_candidate` handles the latter. It randomly chooses an input from the population and applies a number of mutations. # + slideshow={"slide_type": "subslide"} class MutationFuzzer(Fuzzer): def __init__(self, seeds, mutator, schedule): self.seeds = seeds self.mutator = mutator self.schedule = schedule self.inputs = [] self.reset() def reset(self): """Reset the initial population and seed index""" self.population = list(map(lambda x: Seed(x), self.seeds)) self.seed_index = 0 def create_candidate(self): """Returns an input generated by fuzzing a seed in the population""" seed = self.schedule.choose(self.population) # Stacking: Apply multiple mutations to generate the candidate candidate = seed.data trials = min(len(candidate), 1 << random.randint(1,5)) for i in range(trials): candidate = self.mutator.mutate(candidate) return candidate def fuzz(self): """Returns first each seed once and then generates new inputs""" if self.seed_index < len(self.seeds): # Still seeding self.inp = self.seeds[self.seed_index] self.seed_index += 1 else: # Mutating self.inp = self.create_candidate() self.inputs.append(self.inp) return self.inp # + [markdown] slideshow={"slide_type": "subslide"} # Okay, let's take the mutation fuzzer for a spin. Given a single seed, we ask it to generate three inputs. # + slideshow={"slide_type": "fragment"} seed_input = "good" mutation_fuzzer = MutationFuzzer([seed_input], Mutator(), PowerSchedule()) print(mutation_fuzzer.fuzz()) print(mutation_fuzzer.fuzz()) print(mutation_fuzzer.fuzz()) # + [markdown] slideshow={"slide_type": "subslide"} # Let's see how many statements the mutation-based blackbox fuzzer covers in a campaign with n=30k inputs. # # The fuzzer function `runs(crashme_runner, trials=n)` generates `n` inputs and executes them on the `crashme` function via the `crashme_runner`. As stated earlier, the `crashme_runner` also collects coverage information. # + slideshow={"slide_type": "skip"} import time n = 30000 # + slideshow={"slide_type": "fragment"} blackbox_fuzzer = MutationFuzzer([seed_input], Mutator(), PowerSchedule()) start = time.time() blackbox_fuzzer.runs(FunctionCoverageRunner(crashme), trials=n) end = time.time() "It took the blackbox mutation-based fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + [markdown] slideshow={"slide_type": "subslide"} # In order to measure coverage, we import the [population_coverage](Coverage.ipynb#Coverage-of-Basic-Fuzzing) function. It takes a set of inputs and a Python function, executes the inputs on that function and collects coverage information. Specifically, it returns a tuple `(all_coverage, cumulative_coverage)` where `all_coverage` is the set of statements covered by all inputs, and `cumulative_coverage` is the number of statements covered as the number of executed inputs increases. We are just interested in the latter to plot coverage over time. # + slideshow={"slide_type": "skip"} from Coverage import population_coverage # + [markdown] slideshow={"slide_type": "fragment"} # We extract the generated inputs from the blackbox fuzzer and measure coverage as the number of inputs increases. # + slideshow={"slide_type": "fragment"} _, blackbox_coverage = population_coverage(blackbox_fuzzer.inputs, crashme) bb_max_coverage = max(blackbox_coverage) "The blackbox mutation-based fuzzer achieved a maximum coverage of %d statements." % bb_max_coverage # + [markdown] slideshow={"slide_type": "subslide"} # The following generated inputs increased the coverage for our `crashme` [example](#Runner-and-Sample-Program). # + slideshow={"slide_type": "fragment"} [seed_input] + \ [blackbox_fuzzer.inputs[idx] for idx in range(len(blackbox_coverage)) if blackbox_coverage[idx] > blackbox_coverage[idx - 1] ] # + [markdown] slideshow={"slide_type": "subslide"} # ***Summary***. This is how a blackbox mutation-based fuzzer works. We have integrated the *mutator* to generate inputs by fuzzing a provided set of initial seeds and the *power schedule* to decide which seed to choose next. # + [markdown] slideshow={"slide_type": "subslide"} # ### Greybox Mutation-based Fuzzer # # In contrast to a blackbox fuzzer, a greybox fuzzer like [AFL](http://lcamtuf.coredump.cx/afl/) _does_ leverage coverage information. Specifically, a greybox fuzzer adds to the seed population generated inputs which increase code coverage. # # The method `run()` is inherited from the [Fuzzer](Fuzzer.ipynb#Fuzzer-Classes) class. It is called to generate and execute exactly one input. We override this function to add an input to the `population` that increases coverage. The greybox fuzzer attribute `coverages_seen` maintains the set of statements, that have previously been covered. # + slideshow={"slide_type": "subslide"} class GreyboxFuzzer(MutationFuzzer): def reset(self): """Reset the initial population, seed index, coverage information""" super().reset() self.coverages_seen = set() self.population = [] # population is filled during greybox fuzzing def run(self, runner): """Run function(inp) while tracking coverage. If we reach new coverage, add inp to population and its coverage to population_coverage """ result, outcome = super().run(runner) new_coverage = frozenset(runner.coverage()) if new_coverage not in self.coverages_seen: # We have new coverage seed = Seed(self.inp) seed.coverage = runner.coverage() self.coverages_seen.add(new_coverage) self.population.append(seed) return (result, outcome) # + [markdown] slideshow={"slide_type": "subslide"} # Let's take our greybox fuzzer for a spin. # + slideshow={"slide_type": "fragment"} seed_input = "good" greybox_fuzzer = GreyboxFuzzer([seed_input], Mutator(), PowerSchedule()) start = time.time() greybox_fuzzer.runs(FunctionCoverageRunner(crashme), trials=n) end = time.time() "It took the greybox mutation-based fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + [markdown] slideshow={"slide_type": "fragment"} # Does the greybox fuzzer cover more statements after generating the same number of test inputs? # + slideshow={"slide_type": "subslide"} _, greybox_coverage = population_coverage(greybox_fuzzer.inputs, crashme) gb_max_coverage = max(greybox_coverage) "Our greybox mutation-based fuzzer covers %d more statements" % (gb_max_coverage - bb_max_coverage) # + [markdown] slideshow={"slide_type": "fragment"} # Our seed population for our [example](#Runner-and-Sample-Program) now contains the following seeds. # + slideshow={"slide_type": "fragment"} greybox_fuzzer.population # + [markdown] slideshow={"slide_type": "fragment"} # Coverage-feedback is indeed helpful. The new seeds are like bread crumbs or milestones that guide the fuzzer to progress more quickly into deeper code regions. Following is a simple plot showing the coverage achieved over time for both fuzzers on our simple [example](#Runner-and-Sample-Program). # + slideshow={"slide_type": "fragment"} # %matplotlib inline # + slideshow={"slide_type": "skip"} import matplotlib.pyplot as plt # + slideshow={"slide_type": "subslide"} line_bb, = plt.plot(blackbox_coverage, label="Blackbox") line_gb, = plt.plot(greybox_coverage, label="Greybox") plt.legend(handles=[line_bb, line_gb]) plt.title('Coverage over time') plt.xlabel('# of inputs') plt.ylabel('lines covered'); # + [markdown] slideshow={"slide_type": "subslide"} # ***Summary***. We have seen how a greybox fuzzer "discovers" interesting seeds that can lead to more progress. From the input `good`, our greybox fuzzer has slowly learned how to generate the input `bad!` which raises the exception. Now, how can we do that even faster? # # ***Try it***. How much coverage would be achieved over time using a blackbox *generation-based* fuzzer? Try plotting the coverage for all three fuzzers. You can define the blackbox generation-based fuzzer as follows. # ```Python # from Fuzzer import RandomFuzzer # blackbox_gen_fuzzer = RandomFuzzer(min_length=4, max_length=4, char_start=32, char_range=96) # ``` # You can execute your own code by opening this chapter as Jupyter notebook. # # ***Read***. This is the high-level view how AFL works, one of the most successful vulnerability detection tools. If you are interested in the technical details, have a look at: https://github.com/mirrorer/afl/blob/master/docs/technical_details.txt # + [markdown] slideshow={"slide_type": "subslide"} # ### Boosted Greybox Fuzzer # Our boosted greybox fuzzer assigns more energy to seeds that promise to achieve more coverage. We change the power schedule such that seeds that exercise "unusual" paths have more energy. With *unusual paths*, we mean paths that are not exercised very often by generated inputs. # # In order to identify which path is exercised by an input, we leverage the function `getPathID` from the section on [trace coverage](WhenIsEnough.ipynb#Trace-Coverage). # + slideshow={"slide_type": "skip"} import pickle # serializes an object by producing a byte array from all the information in the object import hashlib # produces a 128-bit hash value from a byte array # + [markdown] slideshow={"slide_type": "fragment"} # The function `getPathID` returns a unique hash for a coverage set. # + slideshow={"slide_type": "fragment"} def getPathID(coverage): """Returns a unique hash for the covered statements""" pickled = pickle.dumps(coverage) return hashlib.md5(pickled).hexdigest() # + [markdown] slideshow={"slide_type": "subslide"} # There are several ways to assign energy based on how unusual the exercised path is. In this case, we implement an exponential power schedule which computes the energy $e(s)$ for a seed $s$ as follows # $$e(s) = \frac{1}{f(p(s))^a}$$ # where # * $p(s)$ returns the ID of the path exercised by $s$, # * $f(p)$ returns the number of times the path $p$ is exercised by generated inputs, and # * $a$ is a given exponent. # + slideshow={"slide_type": "subslide"} class AFLFastSchedule(PowerSchedule): def __init__(self, exponent): self.exponent = exponent def assignEnergy(self, population): """Assign exponential energy inversely proportional to path frequency""" for seed in population: seed.energy = 1 / (self.path_frequency[getPathID(seed.coverage)] ** self.exponent) # + [markdown] slideshow={"slide_type": "fragment"} # In the greybox fuzzer, lets keep track of the number of times $f(p)$ each path $p$ is exercised, and update the power schedule. # + slideshow={"slide_type": "subslide"} class CountingGreyboxFuzzer(GreyboxFuzzer): def reset(self): """Reset path frequency""" super().reset() self.schedule.path_frequency = {} def run(self, runner): """Inform scheduler about path frequency""" result, outcome = super().run(runner) path_id = getPathID(runner.coverage()) if not path_id in self.schedule.path_frequency: self.schedule.path_frequency[path_id] = 1 else: self.schedule.path_frequency[path_id] += 1 return(result, outcome) # + [markdown] slideshow={"slide_type": "subslide"} # Okay, lets run our boosted greybox fuzzer $n=10k$ times on our simple [example](#Runner-and-Sample-Program). We set the exponentent of our exponential power schedule to $a=5$. # + slideshow={"slide_type": "fragment"} n = 10000 seed_input = "good" fast_schedule = AFLFastSchedule(5) fast_fuzzer = CountingGreyboxFuzzer([seed_input], Mutator(), fast_schedule) start = time.time() fast_fuzzer.runs(FunctionCoverageRunner(crashme), trials=n) end = time.time() "It took the fuzzer w/ exponential schedule %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + slideshow={"slide_type": "subslide"} x_axis = np.arange(len(fast_schedule.path_frequency)) y_axis = list(fast_schedule.path_frequency.values()) plt.bar(x_axis, y_axis) plt.xticks(x_axis) plt.ylim(0, n) #plt.yscale("log") #plt.yticks([10,100,1000,10000]) plt; # + slideshow={"slide_type": "subslide"} print(" path id 'p' : path frequency 'f(p)'") fast_schedule.path_frequency # + [markdown] slideshow={"slide_type": "fragment"} # How does it compare to our greybox fuzzer with the classical power schedule? # + slideshow={"slide_type": "subslide"} seed_input = "good" orig_schedule = PowerSchedule() orig_fuzzer = CountingGreyboxFuzzer([seed_input], Mutator(), orig_schedule) start = time.time() orig_fuzzer.runs(FunctionCoverageRunner(crashme), trials=n) end = time.time() "It took the fuzzer w/ original schedule %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + slideshow={"slide_type": "subslide"} x_axis = np.arange(len(orig_schedule.path_frequency)) y_axis = list(orig_schedule.path_frequency.values()) plt.bar(x_axis, y_axis) plt.xticks(x_axis) plt.ylim(0, n) #plt.yscale("log") #plt.yticks([10,100,1000,10000]) plt; # + slideshow={"slide_type": "subslide"} print(" path id 'p' : path frequency 'f(p)'") orig_schedule.path_frequency # + [markdown] slideshow={"slide_type": "subslide"} # The exponential power schedule shaves some of the executions of the "high-frequency path" off and adds them to the lower-frequency paths. The path executed least often is either not at all exercised using the traditional power schedule or it is exercised much less often. # # Let's have a look at the energy that is assigned to the discovered seeds. # + slideshow={"slide_type": "subslide"} orig_energy = orig_schedule.normalizedEnergy(orig_fuzzer.population) for (seed, norm_energy) in zip(orig_fuzzer.population, orig_energy): print("'%s', %0.5f, %s" % (getPathID(seed.coverage), norm_energy, repr(seed.data))) # + slideshow={"slide_type": "subslide"} fast_energy = fast_schedule.normalizedEnergy(fast_fuzzer.population) for (seed, norm_energy) in zip(fast_fuzzer.population, fast_energy): print("'%s', %0.5f, %s" % (getPathID(seed.coverage), norm_energy, repr(seed.data))) # + [markdown] slideshow={"slide_type": "subslide"} # Exactly. Our new exponential power schedule assigns most energy to the seed exercising the lowest-frequency path. # # Let's compare them in terms of coverage achieved over time for our simple [example](#Runner-and-Sample-Program). # + slideshow={"slide_type": "fragment"} _, orig_coverage = population_coverage(orig_fuzzer.inputs, crashme) _, fast_coverage = population_coverage(fast_fuzzer.inputs, crashme) line_orig, = plt.plot(orig_coverage, label="Original Greybox Fuzzer") line_fast, = plt.plot(fast_coverage, label="Boosted Greybox Fuzzer") plt.legend(handles=[line_orig, line_fast]) plt.title('Coverage over time') plt.xlabel('# of inputs') plt.ylabel('lines covered'); # + [markdown] slideshow={"slide_type": "subslide"} # As expected, the boosted greybox fuzzer (with the exponential power schedule) achieves coverage much faster. # # ***Summary***. By fuzzing seeds more often that exercise low-frequency paths, we can explore program paths in a much more efficient manner. # # ***Try it***. You can try other exponents for the fast power schedule, or change the power schedule entirely. Note that a large exponent can lead to overflows and imprecisions in the floating point arithmetic producing unexpected results. You can execute your own code by opening this chapter as Jupyter notebook. # # ***Read***. You can find out more about fuzzer boosting in the paper "[Coverage-based Greybox Fuzzing as Markov Chain](https://mboehme.github.io/paper/CCS16.pdf)" \cite{boehme2018greybox} and check out the implementation into AFL at [http://github.com/mboehme/aflfast]. # + [markdown] slideshow={"slide_type": "subslide"} # ### Complex Example: HTMLParser # Let's compare the three fuzzers on a more realistic example, the Python [HTML parser](https://docs.python.org/3/library/html.parser.html). We run all three fuzzers $n=5k$ times on the HTMLParser, starting with the "empty" seed. # + slideshow={"slide_type": "skip"} from html.parser import HTMLParser import traceback # + slideshow={"slide_type": "fragment"} # create wrapper function def my_parser(inp): parser = HTMLParser() # resets the HTMLParser object for every fuzz input parser.feed(inp) n = 5000 seed_input = " " # empty seed blackbox_fuzzer = MutationFuzzer([seed_input], Mutator(), PowerSchedule()) greybox_fuzzer = GreyboxFuzzer([seed_input], Mutator(), PowerSchedule()) boosted_fuzzer = CountingGreyboxFuzzer([seed_input], Mutator(), AFLFastSchedule(5)) # + slideshow={"slide_type": "subslide"} start = time.time() blackbox_fuzzer.runs(FunctionCoverageRunner(my_parser), trials=n) greybox_fuzzer.runs(FunctionCoverageRunner(my_parser), trials=n) boosted_fuzzer.runs(FunctionCoverageRunner(my_parser), trials=n) end = time.time() "It took all three fuzzers %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + [markdown] slideshow={"slide_type": "fragment"} # How do the fuzzers compare in terms of coverage over time? # + slideshow={"slide_type": "subslide"} _, black_coverage = population_coverage(blackbox_fuzzer.inputs, my_parser) _, grey_coverage = population_coverage(greybox_fuzzer.inputs, my_parser) _, boost_coverage = population_coverage(boosted_fuzzer.inputs, my_parser) line_black, = plt.plot(black_coverage, label="Blackbox Fuzzer") line_grey, = plt.plot(grey_coverage, label="Greybox Fuzzer") line_boost, = plt.plot(boost_coverage, label="Boosted Greybox Fuzzer") plt.legend(handles=[line_boost, line_grey, line_black]) plt.title('Coverage over time') plt.xlabel('# of inputs') plt.ylabel('lines covered'); # + [markdown] slideshow={"slide_type": "fragment"} # Both greybox fuzzers clearly outperform the blackbox fuzzer. The reason is that the greybox fuzzer "discovers" interesting inputs along the way. Let's have a look at the last 10 inputs generated by the greybox versus blackbox fuzzer. # + slideshow={"slide_type": "subslide"} blackbox_fuzzer.inputs[-10:] # + slideshow={"slide_type": "fragment"} greybox_fuzzer.inputs[-10:] # + [markdown] slideshow={"slide_type": "subslide"} # The greybox fuzzer executes much more complicated inputs, many of which include special characters such as opening and closing brackets and chevrons (i.e., `<, >, [, ]`). Yet, many important keywords, such as `<html>` are still missing. # # To inform the fuzzer about these important keywords, we will need [grammars](Grammars.ipynb); in the section on [smart greybox fuzzing](LangFuzzer.ipynb), we combine them with the techniques above. # # ***Try it***. You can re-run these experiments to understand the variance of fuzzing experiments. Sometimes, the fuzzer that we claim to be superior does not seem to outperform the inferior fuzzer. In order to do this, you just need to open this chapter as Jupyter notebook. # + [markdown] slideshow={"slide_type": "slide"} # ## Directed Greybox Fuzzing # Sometimes, you just want the fuzzer to reach some dangerous location in the source code. This could be a location where you expect a buffer overflow. Or you want to test a recent change in your code base. How do we direct the fuzzer towards these locations? # # In this chapter, we introduce directed greybox fuzzing as an optimization problem. # + [markdown] slideshow={"slide_type": "subslide"} # ### Solving the Maze # To provide a meaningful example where you can easily change the code complexity and target location, we generate the maze source code from the maze provided as string. This example is loosely based on an old [blog post](https://feliam.wordpress.com/2010/10/07/the-symbolic-maze/) on symbolic execution by <NAME> (Quick shout-out!). # # You simply specify the maze as a string. Like so. # + slideshow={"slide_type": "subslide"} maze_string = """ +-+-----+ |X| | | | --+ | | | | | | +-- | | | |#| +-----+-+ """ # + [markdown] slideshow={"slide_type": "fragment"} # The code is generated using the function `generate_maze_code`. We'll hide the implementation and instead explain what it does. If you are interested in the coding, go [here](ControlFlow.ipynb#Example:-Maze). # + slideshow={"slide_type": "skip"} from ControlFlow import generate_maze_code # + slideshow={"slide_type": "fragment"} maze_code = generate_maze_code(maze_string) exec(maze_code) # + [markdown] slideshow={"slide_type": "subslide"} # The objective is to get the "X" to the "#" by providing inputs `D` for down, `U` for up, `L` for left, and `R` for right. # + slideshow={"slide_type": "fragment"} print(maze("DDDDRRRRUULLUURRRRDDDD")) # Appending one more 'D', you have reached the target. # + [markdown] slideshow={"slide_type": "subslide"} # Each character in `maze_string` represents a tile. For each tile, a tile-function is generated. # * If the current tile is "benign" (` `), the tile-function corresponding to the next input character (D, U, L, R) is called. Unexpected input characters are ignored. If no more input characters are left, it returns "VALID" and the current maze state. # * If the current tile is a "trap" (`+`,`|`,`-`), it returns "INVALID" and the current maze state. # * If the current tile is the "target" (`#`), it returns "SOLVED" and the current maze state. # # ***Try it***. You can test other sequences of input characters, or even change the maze entirely. In order to execute your own code, you just need to open this chapter as Jupyter notebook. # # To get an idea of the generated code, lets look at the static [call graph](https://en.wikipedia.org/wiki/Call_graph). A call graph shows the order in which functions can be executed. # # *Troubleshooting*: if nothing is shown after executing `callgraph()`, check `pyan` or `pyan3` is correctly installed. If `pyan3` is installed, it is necessary to modify `construct_callgraph()` in [ControlFlow.ipynb](ControlFlow.ipynb#Call-Graph-Helpers) accordingly, as follows: # # ```python # ... # - os.system(f'pyan {file_name} --uses --defines --colored --grouped --annotated --dot > {cg_file}') # + os.system(f'pyan3 {file_name} --uses --defines --colored --grouped --annotated --dot > {cg_file}') # ... # ``` # # + slideshow={"slide_type": "skip"} from ControlFlow import callgraph # + slideshow={"slide_type": "subslide"} callgraph(maze_code) # + [markdown] slideshow={"slide_type": "subslide"} # ### A First Attempt # # We introduce a `DictMutator` class which mutates strings by inserting a keyword from a given dictionary: # + slideshow={"slide_type": "fragment"} class DictMutator(Mutator): def __init__(self, dictionary): super().__init__() self.dictionary = dictionary self.mutators.append(self.insert_from_dictionary) def insert_from_dictionary(self, s): """Returns s with a keyword from the dictionary inserted""" pos = random.randint(0, len(s)) random_keyword = random.choice(self.dictionary) return s[:pos] + random_keyword + s[pos:] # + [markdown] slideshow={"slide_type": "subslide"} # To fuzz the maze, we extend the `DictMutator` class to append dictionary keywords to the end of the seed and to remove a character from the end of the seed. # + slideshow={"slide_type": "subslide"} class MazeMutator(DictMutator): def __init__(self, dictionary): super().__init__(dictionary) self.mutators.append(self.delete_last_character) self.mutators.append(self.append_from_dictionary) def append_from_dictionary(self,s): """Returns s with a keyword from the dictionary appended""" random_keyword = random.choice(self.dictionary) return s + random_keyword def delete_last_character(self,s): """Returns s without the last character""" if (len(s) > 0): return s[:-1] # + [markdown] slideshow={"slide_type": "subslide"} # Let's try a standard greybox fuzzer with the classic power schedule and our extended maze mutator (n=10k). # + slideshow={"slide_type": "fragment"} n = 10000 seed_input = " " # empty seed maze_mutator = MazeMutator(["L","R","U","D"]) maze_schedule = PowerSchedule() maze_fuzzer = GreyboxFuzzer([seed_input], maze_mutator, maze_schedule) start = time.time() maze_fuzzer.runs(FunctionCoverageRunner(maze), trials=n) end = time.time() "It took the fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + [markdown] slideshow={"slide_type": "subslide"} # We will need to print statistics for several fuzzers. Why don't we define a function for that? # + slideshow={"slide_type": "subslide"} def print_stats(fuzzer): total = len(fuzzer.population) solved = 0 invalid = 0 valid = 0 for seed in fuzzer.population: s = maze(str(seed.data)) if "INVALID" in s: invalid += 1 elif "VALID" in s: valid += 1 elif "SOLVED" in s: solved += 1 if solved == 1: print("First solution: %s" % repr(seed)) else: print("??") print("""Out of %d seeds, * %4d solved the maze, * %4d were valid but did not solve the maze, and * %4d were invalid""" % (total, solved, valid, invalid)) # + [markdown] slideshow={"slide_type": "subslide"} # How well does our good, old greybox fuzzer do? # + slideshow={"slide_type": "fragment"} print_stats(maze_fuzzer) # + [markdown] slideshow={"slide_type": "subslide"} # It probably didn't solve the maze a single time. How can we make the fuzzer aware how "far" a seed is from reaching the target? If we know that, we can just assign more energy to that seed. # # ***Try it***. Print the statistics for the boosted fuzzer using the `AFLFastSchedule` and the `CountingGreyboxFuzzer`. It will likely perform much better than the unboosted greybox fuzzer: The lowest-probablity path happens to be also the path which reaches the target. You can execute your own code by opening this chapter as Jupyter notebook. # + [markdown] slideshow={"slide_type": "subslide"} # ### Computing Function-Level Distance # # Using the static call graph for the maze code and the target function, we can compute the distance of each function $f$ to the target $t$ as the length of the shortest path between $f$ and $t$. # # Fortunately, the generated maze code includes a function called `target_tile` which returns the name of the target-function. # + slideshow={"slide_type": "fragment"} target = target_tile() target # + [markdown] slideshow={"slide_type": "subslide"} # Now, we need to find the corresponding function in the call graph. The function `get_callgraph` returns the call graph for the maze code as [networkx](https://networkx.github.io/) graph. Networkx provides some useful functions for graph analysis. # + slideshow={"slide_type": "skip"} import networkx as nx from ControlFlow import get_callgraph # + slideshow={"slide_type": "fragment"} cg = get_callgraph(maze_code) for node in cg.nodes(): if target in node: target_node = node break target_node # + [markdown] slideshow={"slide_type": "subslide"} # We can now generate the function-level distance. The dictionary `distance` contains for each function the distance to the target-function. If there is no path to the target, we assign a maximum distance (`0xFFFF`). # # The function `nx.shortest_path_length(CG, node, target_node)` returns the length of the shortest path from function `node` to function `target_node` in the call graph `CG`. # + slideshow={"slide_type": "subslide"} distance = {} for node in cg.nodes(): if "__" in node: name = node.split("__")[-1] else: name = node try: distance[name] = nx.shortest_path_length(cg, node, target_node) except: distance[name] = 0xFFFF # + [markdown] slideshow={"slide_type": "fragment"} # These are the distance values for all tile-functions on the path to the target function. # + slideshow={"slide_type": "subslide"} {k: distance[k] for k in list(distance) if distance[k] < 0xFFFF} # + [markdown] slideshow={"slide_type": "subslide"} # ***Summary***. Using the static call graph and the target function $t$, we have shown how to compute the function-level distance of each function $f$ to the target $t$. # # ***Try it***. You can try and execute your own code by opening this chapter as Jupyter notebook. # # * How do we compute distance if there are multiple targets? (Hint: [Geometric Mean](https://en.wikipedia.org/wiki/Geometric_mean)). # * Given the call graph (CG) and the control-flow graph (CFG$_f$) for each function $f$, how do we compute basic-block (BB)-level distance? (Hint: In CFG$_f$, measure the BB-level distance to *calls* of functions on the path to the target function. Remember that BB-level distance in functions with higher function-level distance is higher, too.) # # ***Read***. If you are interested in other aspects of search, you can follow up by reading the chapter on [Search-based Fuzzing](SearchBasedFuzzer.ipynb). If you are interested, how to solve the problems above, you can have a look at our paper on "[Directed Greybox Fuzzing](https://mboehme.github.io/paper/CCS17.pdf)". # + [markdown] slideshow={"slide_type": "subslide"} # ### Directed Power Schedule # Now that we know how to compute the function-level distance, let's try to implement a power schedule that assigns *more energy to seeds with a lower average distance* to the target function. Notice that the distance values are all *pre-computed*. These values are injected into the program binary, just like the coverage instrumentation. In practice, this makes the computation of the average distance *extremely efficient*. # # If you really want to know. Given the function-level distance $d_f(s,t)$ of a function $s$ to a function $t$ in call graph $CG$, our directed power schedule computes the seed distance $d(i,t)$ for a seed $i$ to function $t$ as $d(i,t)=\dfrac{\sum_{s\in CG} d_f(s,t)}{|CG|}$ where $|CG|$ is the number of nodes in the call graph $CG$. # + slideshow={"slide_type": "subslide"} class DirectedSchedule(PowerSchedule): def __init__(self, distance, exponent): self.distance = distance self.exponent = exponent def __getFunctions__(self, coverage): functions = set() for f, _ in set(coverage): functions.add(f) return functions def assignEnergy(self, population): """Assigns each seed energy inversely proportional to the average function-level distance to target.""" for seed in population: if not hasattr(seed, 'distance'): num_dist = 0 sum_dist = 0 for f in self.__getFunctions__(seed.coverage): if f in list(distance): sum_dist += distance[f] num_dist += 1 seed.distance = sum_dist / num_dist seed.energy = (1 / seed.distance) ** self.exponent # + [markdown] slideshow={"slide_type": "subslide"} # Let's see how the directed schedule performs against the good, old greybox fuzzer. # + slideshow={"slide_type": "fragment"} directed_schedule = DirectedSchedule(distance, 3) directed_fuzzer = GreyboxFuzzer([seed_input], maze_mutator, directed_schedule) start = time.time() directed_fuzzer.runs(FunctionCoverageRunner(maze), trials=n) end = time.time() "It took the fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + slideshow={"slide_type": "subslide"} print_stats(directed_fuzzer) # + [markdown] slideshow={"slide_type": "fragment"} # It probably didn't solve a single maze either, but we have more valid solutions. So, there is definitely progress. # # Let's have a look at the distance values for each seed. # + slideshow={"slide_type": "subslide"} y = [seed.distance for seed in directed_fuzzer.population] x = range(len(y)) plt.scatter(x, y) plt.ylim(0,max(y)) plt.xlabel("Seed ID") plt.ylabel("Distance"); # + [markdown] slideshow={"slide_type": "subslide"} # Let's normalize the y-axis and improve the importance of the small distance seeds. # # ### Improved Directed Power Schedule # The improved directed schedule normalizes seed distance between the minimal and maximal distance. # Again, if you really want to know. Given the seed distance $d(i,t)$ of a seed $i$ to a function $t$, our improved power schedule computes the new seed distance $d'(i,t)$ as # $$ # d'(i,t)=\begin{cases} # 1 & \text{if } d(i,t) = \text{minD} = \text{maxD}\\ # \text{maxD} - \text{minD} & \text{if } d(i,t) = \text{minD} \neq \text{maxD}\\ # \frac{\text{maxD} - \text{minD}}{d(i,t)-\text{minD}} & \text{otherwise} # \end{cases} # $$ # where # $$\text{minD}=\min_{i\in T}[d(i,t)]$$ # and # $$\text{maxD}=\max_{i\in T}[d(i,t)]$$ # where $T$ is the set of seeds (i.e., the population). # + slideshow={"slide_type": "subslide"} class AFLGoSchedule(DirectedSchedule): def assignEnergy(self, population): """Assigns each seed energy inversely proportional to the average function-level distance to target.""" min_dist = 0xFFFF max_dist = 0 for seed in population: if not hasattr(seed, 'distance'): num_dist = 0 sum_dist = 0 for f in self.__getFunctions__(seed.coverage): if f in list(distance): sum_dist += distance[f] num_dist += 1 seed.distance = sum_dist / num_dist if seed.distance < min_dist: min_dist = seed.distance if seed.distance > max_dist: max_dist = seed.distance for seed in population: if (seed.distance == min_dist): if min_dist == max_dist: seed.energy = 1 else: seed.energy = max_dist - min_dist else: seed.energy = ((max_dist - min_dist) / (seed.distance - min_dist)) # + [markdown] slideshow={"slide_type": "subslide"} # Let's see how the improved power schedule performs. # + slideshow={"slide_type": "fragment"} aflgo_schedule = AFLGoSchedule(distance, 3) aflgo_fuzzer = GreyboxFuzzer([seed_input], maze_mutator, aflgo_schedule) start = time.time() aflgo_fuzzer.runs(FunctionCoverageRunner(maze), trials=n) end = time.time() "It took the fuzzer %0.2f seconds to generate and execute %d inputs." % (end - start, n) # + slideshow={"slide_type": "subslide"} print_stats(aflgo_fuzzer) # + [markdown] slideshow={"slide_type": "fragment"} # In contrast to all previous power schedules, this one generates hundreds of solutions. It has generated many solutions. # # Let's filter out all ignored input characters from the first solution. The function `filter(f, seed.data)` returns a list of elements `e` in `seed.data` where the function `f` applied on `e` returns True. # + slideshow={"slide_type": "subslide"} for seed in aflgo_fuzzer.population: s = maze(str(seed.data)) if "SOLVED" in s: filtered = "".join(list(filter(lambda c: c in "UDLR", seed.data))) print(filtered) break # + [markdown] slideshow={"slide_type": "subslide"} # This is definitely a solution for the maze specified at the beginning! # # ***Summary***. After pre-computing the function-level distance to the target, we can develop a power schedule that assigns more energy to a seed with a smaller average function-level distance to the target. By normalizing seed distance values between the minimum and maximum seed distance, we can further boost the directed power schedule. # # ***Try it***. Implement and evaluate a simpler directed power that uses the minimal (rather than average) function-level distance. What is the downside of using the minimal distance? In order to execute your code, you just need to open this chapter as Jupyter notebook. # # ***Read***. You can find out more about directed greybox fuzzing in the equally-named paper "[Directed Greybox Fuzzing](https://mboehme.github.io/paper/CCS17.pdf)" \cite{boehme2017greybox} and check out the implementation into AFL at [http://github.com/aflgo/aflgo](http://github.com/aflgo/aflgo). # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Lessons Learned # # * A *greybox fuzzer* generates thousands of inputs per second. Pre-processing and lightweight instrumentation # * allows to maintain the efficiency *during* the fuzzing campaign, and # * still provides enough information to control progress and slightly steer the fuzzer. # * The *power schedule* allows to steer/control the fuzzer. For instance, # * Our [boosted greybox fuzzer](#Fuzzer-Boosting) spends more energy on seeds that exercise "unlikely" paths. The hope is that the generated inputs exercise even more unlikely paths. This in turn increases the number of paths explored per unit time. # * Our [directed greybox fuzzer](#Directed-Greybox-Fuzzing) spends more energy on seeds that are "closer" to a target location. The hope is that the generated inputs get even closer to the target. # * The *mutator* defines the fuzzer's search space. [Customizing the mutator](GreyboxFuzzer.ipynb#A-First-Attempt) for the given program allows to reduce the search space to only relevant inputs. In a couple of chapters, we'll learn about [dictionary-based, and grammar-based mutators](GreyboxGrammarFuzzer.ipynb) to increase the ratio of valid inputs generated. # # + [markdown] button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Next Steps # # Our aim is still to sufficiently cover functionality, such that we can trigger as many bugs as possible. To this end, we focus on two classes of techniques: # # 1. Try to cover as much _specified_ functionality as possible. Here, we would need a _specification of the input format,_ distinguishing between individual input elements such as (in our case) numbers, operators, comments, and strings – and attempting to cover as many of these as possible. We will explore this as it comes to [grammar-based testing](GrammarFuzzer.ipynb), and especially in [grammar-based mutations](GreyboxGrammarFuzzer.ipynb). # # 2. Try to cover as much _implemented_ functionality as possible. The concept of a "population" that is systematically "evolved" through "mutations" will be explored in depth when discussing [search-based testing](SearchBasedFuzzer.ipynb). Furthermore, [symbolic testing](SymbolicFuzzer.ipynb) introduces how to systematically reach program locations by solving the conditions that lie on their paths. # # These two techniques make up the gist of the book; and, of course, they can also be combined with each other. As usual, we provide runnable code for all. Enjoy! # + [markdown] slideshow={"slide_type": "subslide"} # We're done, so we clean up: # + slideshow={"slide_type": "skip"} import shutil import os # + slideshow={"slide_type": "fragment"} if os.path.exists('callgraph.dot'): os.remove('callgraph.dot') if os.path.exists('callgraph.py'): os.remove('callgraph.py') # + [markdown] slideshow={"slide_type": "slide"} # ## Background # # * **Find out more about AFL**: http://lcamtuf.coredump.cx/afl/ # * **Learn about LibFuzzer** (another famous greybox fuzzer): http://llvm.org/docs/LibFuzzer.html # * **How quickly must a whitebox fuzzer exercise each path to remain more efficient than a greybox fuzzer?** <NAME> and <NAME>. 2016. [A Probabilistic Analysis of the Efficiency of Automated Software Testing](https://mboehme.github.io/paper/TSE15.pdf), IEEE TSE, 42:345-360 \cite{boehme2016efficiency} # + [markdown] button=false new_sheet=true run_control={"read_only": false} slideshow={"slide_type": "slide"} # ## Exercises # # To be added. \todo{}
docs/beta/notebooks/GreyboxFuzzer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Reliance Test # ## predict the risk of hospital readmission for patients # ## Let's configure our environment import sys # !{sys.executable} -m pip install sagemaker==2.42.0 -U # !{sys.executable} -m pip install sagemaker-experiments # !{sys.executable} -m pip install xgboost==1.3.3 # #!pip freeze | grep sagemaker # #!pip freeze | grep xgboost # + import pandas as pd import boto3 import sagemaker sess = boto3.Session() region = sess.region_name sm = sess.client('sagemaker') role = sagemaker.get_execution_role() region = sess.region_name account_id = sess.client('sts', region_name=region).get_caller_identity()["Account"] bucket = 'sagemaker-studio-{}-{}'.format(sess.region_name, account_id) prefix = 'xgboost-readmission-predictor' try: if sess.region_name == "us-east-1": sess.client('s3').create_bucket(Bucket=bucket) else: sess.client('s3').create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': sess.region_name}) except Exception as e: print("Looks like you already have a bucket of this name. That's good!") framework_version = '1.2-2' docker_image_name = sagemaker.image_uris.retrieve(framework='xgboost', region=region, version=framework_version) # Workaround while versions are not updated in SM SDK framework_version = '1.3-1' docker_image_name = docker_image_name[:-5] + framework_version print("Setting some useful environment variables (bucket, prefix, region, docker_image_name)...") # %store bucket # %store prefix # %store region # %store docker_image_name # %store framework_version # - # --- # ## Let's upload the data to S3 local_raw_path = "readmission.csv" raw_dir = f"{prefix}/data/raw" s3uri_raw = sagemaker.s3.S3Uploader.upload(local_raw_path, f's3://{bucket}/{raw_dir}') s3uri_raw # Store the raw data S3 URI for later: # + import sagemaker sess = sagemaker.Session() bucket = sess.default_bucket() prefix = "Reliance/xgboost-risk-predictor" # Define IAM role import boto3 import re from sagemaker import get_execution_role role = get_execution_role() role # + import io import os import sys import time import json import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display as dis from time import strftime, gmtime from sagemaker.inputs import TrainingInput from sagemaker.serializers import CSVSerializer from IPython import display import pandas as pd import seaborn as sns # - #loading Dataset df = pd.read_csv("readmission.csv") #displaying first 10 rows of data df.head(10).T #checking shape of the dataset df.shape # + #Checking data types of each variable num_vars = df.columns[df.dtypes != 'object'] cat_vars = df.columns[df.dtypes == 'object'] print(num_vars, cat_vars) # - #Checking for missing values in dataset df[num_vars].isnull().sum() # Distribution of Readmission sns.countplot(df['Readmission.Status']).set_title('Distrinution of Readmission') # !pip install imbalanced-learn f = sns.countplot(x='Readmission.Status', data=df, hue='Gender') plt.legend(['Female', 'Male']) f.set_title("readmission by gender") f.set_xticklabels(['0', '1']) plt.xlabel(""); heat_map = sns.heatmap(df.corr(method='pearson'), annot=True, fmt='.2f', linewidths=2) heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=45); # + plt.subplot(121) df[df['Readmission.Status']==1].groupby('Age_Category')['Age'].count().plot(kind='bar') plt.title('Age Distribution of Patients readmitted') plt.subplot(122) df[df['Readmission.Status']==0].groupby('Age_Category')['Age'].count().plot(kind='bar') plt.title('Age Distribution of Patients not readmitted') # - # From the above visulaization, we have more elderly generally in the datset. f = sns.countplot(x='HCC.Riskscore', data=df, hue='Readmission.Status') f.set_title('Readmitted by Risk score') plt.ylabel('') plt.xlabel('Readmitted.status') plt.legend(['Not readmitted', 'Readmitted']); # ## Let us One Hot-encode our categorical variables. categorical_columns = ["Gender", "Race", "DRG.class", "DRG.Complication"] for column in categorical_columns: tempdf = pd.get_dummies(df[column], prefix=column) df = pd.merge( left=df, right=tempdf, left_index=True, right_index=True, ) df = df.drop(columns=column) print(df) # ## From the Data exploration, there were no missing values and the features were well distributed. But target variable was imbalanced. Let's drop the ID.codes column. #drop the ID.Codes column. df.drop(df.columns[[0]], axis=1, inplace=True) print(df) # ## Let's split the dataset train_data, validation_data, test_data = np.split( df.sample(frac=1, random_state=1729), [int(0.7 * len(df)), int(0.9 * len(df))], ) train_data.shape, validation_data.shape, test_data.shape train_data.head(2) validation_data.head(2) test_data.head(2) df.shape # ## Create CSV files for the split datasets # + train_file_name = "train.csv" validation_file_name = "validation.csv" test_file_name = "test.csv" train_data.to_csv(train_file_name , header=False, index=False) validation_data.to_csv(validation_file_name, header=False, index=False) test_data.to_csv(test_file_name, header=False, index=False) # - # ## Upload files to s3 # + train_dir = f"{prefix}/data/train" val_dir = f"{prefix}/data/validation" test_dir = f"{prefix}/data/test" s3uri_train = sagemaker.s3.S3Uploader.upload(train_file_name, f's3://{bucket}/{train_dir}') s3uri_validation = sagemaker.s3.S3Uploader.upload(validation_file_name, f's3://{bucket}/{val_dir}') s3uri_test = sagemaker.s3.S3Uploader.upload(test_file_name, f's3://{bucket}/{test_dir}') s3uri_train, s3uri_validation, s3uri_test # - # ## Let us Model the data #Supress default INFO logging import logging logger = logging.getLogger() logger.setLevel(logging.ERROR) # + import pprint from time import strftime, gmtime import boto3 import sagemaker from sagemaker import get_execution_role from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig from smexperiments.experiment import Experiment from smexperiments.trial import Trial from smexperiments.trial_component import TrialComponent # - # --- # ## Train # # We'll use the XGBoost library to train a class of models known as gradient boosted decision trees on the data that we just uploaded. # from sagemaker.inputs import TrainingInput s3_input_train = TrainingInput(s3_data=s3uri_train, content_type='csv') s3_input_validation = TrainingInput(s3_data=s3uri_validation, content_type='csv') pprint.pprint(s3_input_train.config) pprint.pprint(s3_input_validation.config) # ### Amazon SageMaker Experiments allows me to keep track of my model training # # # # # Helper to create timestamps create_date = lambda: strftime("%Y-%m-%d-%H-%M-%S", gmtime()) readmission_predictor_experiment = Experiment.create(experiment_name=f"readmission-predictor-xgboost-{create_date()}", description="Using xgboost to predict customer churn", sagemaker_boto_client=boto3.client('sagemaker')) # #### Hyperparameters # Now we can specify our XGBoost hyperparameters. hyperparams = {"max_depth":5, "subsample":0.8, "num_round":600, "eta":0.2, "gamma":4, "min_child_weight":6, "objective":'binary:logistic', "verbosity": 0 } # + # %%writefile xgboost_readmission_predictor.py import argparse import json import os import pickle import random import tempfile import urllib.request import xgboost from smdebug import SaveConfig from smdebug.xgboost import Hook def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--max_depth", type=int, default=5) parser.add_argument("--eta", type=float, default=0.2) parser.add_argument("--gamma", type=int, default=4) parser.add_argument("--min_child_weight", type=int, default=6) parser.add_argument("--subsample", type=float, default=0.8) parser.add_argument("--verbosity", type=int, default=0) parser.add_argument("--objective", type=str, default="binary:logistic") parser.add_argument("--num_round", type=int, default=50) parser.add_argument("--smdebug_path", type=str, default=None) parser.add_argument("--smdebug_frequency", type=int, default=1) parser.add_argument("--smdebug_collections", type=str, default='metrics') parser.add_argument("--output_uri", type=str, default="/opt/ml/output/tensors", help="S3 URI of the bucket where tensor data will be stored.") parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN')) parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION')) parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR']) args = parser.parse_args() return args def create_smdebug_hook(out_dir, train_data=None, validation_data=None, frequency=1, collections=None,): save_config = SaveConfig(save_interval=frequency) hook = Hook( out_dir=out_dir, train_data=train_data, validation_data=validation_data, save_config=save_config, include_collections=collections, ) return hook def main(): args = parse_args() train, validation = args.train, args.validation parse_csv = "?format=csv&label_column=0" dtrain = xgboost.DMatrix(train+parse_csv) dval = xgboost.DMatrix(validation+parse_csv) watchlist = [(dtrain, "train"), (dval, "validation")] params = { "max_depth": args.max_depth, "eta": args.eta, "gamma": args.gamma, "min_child_weight": args.min_child_weight, "subsample": args.subsample, "verbosity": args.verbosity, "objective": args.objective} # The output_uri is a the URI for the s3 bucket where the metrics will be # saved. output_uri = ( args.smdebug_path if args.smdebug_path is not None else args.output_uri ) collections = ( args.smdebug_collections.split(',') if args.smdebug_collections is not None else None ) hook = create_smdebug_hook( out_dir=output_uri, frequency=args.smdebug_frequency, collections=collections, train_data=dtrain, validation_data=dval, ) bst = xgboost.train( params=params, dtrain=dtrain, evals=watchlist, num_boost_round=args.num_round, callbacks=[hook]) if not os.path.exists(args.model_dir): os.makedirs(args.model_dir) model_location = os.path.join(args.model_dir, 'xgboost-model') pickle.dump(bst, open(model_location, 'wb')) if __name__ == "__main__": main() def model_fn(model_dir): """Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type. """ model_files = (file for file in os.listdir(model_dir) if os.path.isfile(os.path.join(model_dir, file))) model_file = next(model_files) try: booster = pickle.load(open(os.path.join(model_dir, model_file), 'rb')) format = 'pkl_format' except Exception as exp_pkl: try: booster = xgboost.Booster() booster.load_model(os.path.join(model_dir, model_file)) format = 'xgb_format' except Exception as exp_xgb: raise ModelLoadInferenceError("Unable to load model: {} {}".format(str(exp_pkl), str(exp_xgb))) booster.set_param('nthread', 1) return booster, format def predict_fn(input_object, model): """ Perform prediction on the deserialized object, with the loaded model. """ X_test = xgboost.DMatrix(input_object.values) predictions_probs = model.predict(X_test) predictions = predictions_probs.round() return {"predictions": predictions} def input_fn(request_body, content_type): """ Perform preprocessing task on inference dataset. """ if content_type == "text/csv": df = pd.read_csv(StringIO(request_body), header=None) return df else: raise ValueError("{} not supported by script!".format(content_type)) # - from sagemaker.xgboost.estimator import XGBoost role = sagemaker.get_execution_role() sm_sess = sagemaker.session.Session() # + train_script_name = 'xgboost_readmission_predictor.py' trial = Trial.create(trial_name=f'framework-mode-trial-{create_date()}', experiment_name=readmission_predictor_experiment.experiment_name, sagemaker_boto_client=boto3.client('sagemaker')) # - # ### Setting up Amazon SageMaker Debugger # debug_rules = [Rule.sagemaker(rule_configs.loss_not_decreasing()), Rule.sagemaker(rule_configs.overtraining()), Rule.sagemaker(rule_configs.overfit()) ] # + framework_xgb = XGBoost(image_uri=docker_image_name, entry_point=train_script_name, role=role, framework_version=framework_version, py_version="py3", hyperparameters=hyperparams, instance_count=1, instance_type='ml.m4.xlarge', output_path=f's3://{bucket}/{prefix}/output', base_job_name='readmission-predictor-experiment', sagemaker_session=sm_sess, rules=debug_rules ) framework_xgb.fit(inputs={ 'train': s3_input_train, 'validation': s3_input_validation }, experiment_config={ 'ExperimentName': readmission_predictor_experiment.experiment_name, 'TrialName': trial.trial_name, 'TrialComponentDisplayName': 'Training' } ) # - # ## Save training Job training_job_name = framework_xgb.latest_training_job.job_name training_job_name # %store training_job_name # ## Store the S3 URI and training script name where our training script was saved framework_xgb.hyperparameters() s3_modeling_code_uri = eval(framework_xgb.hyperparameters()["sagemaker_submit_directory"]) s3_modeling_code_uri # %store s3_modeling_code_uri # %store train_script_name # ## Evaluation # + from sagemaker.s3 import S3Uploader, S3Downloader # Get name of training job and other variables # %store -r training_job_name training_job_name # + estimator = sagemaker.estimator.Estimator.attach(training_job_name) s3uri_model = estimator.model_data print("\ns3uri_model =",s3uri_model) S3Downloader.download(s3uri_model, ".") # + import json import os import tarfile import logging import pickle import pandas as pd import xgboost from sklearn.metrics import classification_report, roc_auc_score, accuracy_score model_path = "model.tar.gz" with tarfile.open(model_path) as tar: tar.extractall(path=".") print("Loading xgboost model.") model = pickle.load(open("xgboost-model", "rb")) model # - print("Loading test input data") test_path = "test.csv" df = pd.read_csv(test_path, header=None) df print("Reading test data. We should get an `DMatrix` object...") y_test = df.iloc[:, 0].to_numpy() df.drop(df.columns[0], axis=1, inplace=True) X_test = xgboost.DMatrix(df.values) X_test # + print("Performing predictions against test data.") predictions_probs = model.predict(X_test) predictions = predictions_probs.round() print("Creating classification evaluation report") acc = accuracy_score(y_test, predictions) auc = roc_auc_score(y_test, predictions_probs) print("Accuracy =", acc) print("AUC =", auc) # - # ### Creating a classification report # + import pprint # The metrics reported can change based on the model used - check the link for the documentation report_dict = { "binary_classification_metrics": { "accuracy": { "value": acc, "standard_deviation": "NaN", }, "auc": {"value": auc, "standard_deviation": "NaN"}, }, } print("Classification report:") pprint.pprint(report_dict) # + evaluation_output_path = os.path.join( ".", "evaluation.json" ) print("Saving classification report to {}".format(evaluation_output_path)) with open(evaluation_output_path, "w") as f: f.write(json.dumps(report_dict)) # + # %%writefile evaluate.py """Evaluation script for measuring model accuracy.""" import json import os import tarfile import logging import pickle import pandas as pd import xgboost logger = logging.getLogger() logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) # May need to import additional metrics depending on what you are measuring. # See https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html from sklearn.metrics import classification_report, roc_auc_score, accuracy_score def get_dataset(dir_path, dataset_name) -> pd.DataFrame: files = [ os.path.join(dir_path, file) for file in os.listdir(dir_path) ] if len(files) == 0: raise ValueError(('There are no files in {}.\n' + 'This usually indicates that the channel ({}) was incorrectly specified,\n' + 'the data specification in S3 was incorrectly specified or the role specified\n' + 'does not have permission to access the data.').format(files, dataset_name)) raw_data = [ pd.read_csv(file, header=None) for file in files ] df = pd.concat(raw_data) return df if __name__ == "__main__": model_path = "/opt/ml/processing/model/model.tar.gz" with tarfile.open(model_path) as tar: tar.extractall(path="..") logger.debug("Loading xgboost model.") model = pickle.load(open("xgboost-model", "rb")) logger.info("Loading test input data") test_path = "/opt/ml/processing/test" df = get_dataset(test_path, "test_set") logger.debug("Reading test data.") y_test = df.iloc[:, 0].to_numpy() df.drop(df.columns[0], axis=1, inplace=True) X_test = xgboost.DMatrix(df.values) logger.info("Performing predictions against test data.") predictions_probs = model.predict(X_test) predictions = predictions_probs.round() logger.info("Creating classification evaluation report") acc = accuracy_score(y_test, predictions) auc = roc_auc_score(y_test, predictions_probs) # The metrics reported can change based on the model used, but it must be a specific name per (https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-model-quality-metrics.html) report_dict = { "binary_classification_metrics": { "accuracy": { "value": acc, "standard_deviation": "NaN", }, "auc": {"value": auc, "standard_deviation": "NaN"}, }, } logger.info("Classification report:\n{}".format(report_dict)) evaluation_output_path = os.path.join( "/opt/ml/processing/evaluation", "evaluation.json" ) logger.info("Saving classification report to {}".format(evaluation_output_path)) with open(evaluation_output_path, "w") as f: f.write(json.dumps(report_dict)) # - # ## Let us run the above script from sagemaker.processing import ( ProcessingInput, ProcessingOutput, ScriptProcessor, ) # Processing step for evaluation processor = ScriptProcessor( image_uri=docker_image_name, command=["python3"], instance_type="ml.m5.xlarge", instance_count=1, base_job_name="readmission/eval-script", sagemaker_session=sm_sess, role=role, ) entrypoint = "evaluate.py" from time import strftime, gmtime # Helper to create timestamps create_date = lambda: strftime("%Y-%m-%d-%H-%M-%S", gmtime()) processor.run( code=entrypoint, inputs=[ sagemaker.processing.ProcessingInput( source=s3uri_model, destination="/opt/ml/processing/model", ), sagemaker.processing.ProcessingInput( source=s3uri_test, destination="/opt/ml/processing/test", ), ], outputs=[ sagemaker.processing.ProcessingOutput( output_name="evaluation", source="/opt/ml/processing/evaluation" ), ], job_name=f"ReadmissionEval-{create_date()}" ) # + for proc_in in processor.latest_job.inputs: if proc_in.input_name == "code": s3_evaluation_code_uri = proc_in.source s3_evaluation_code_uri # - # %store s3_evaluation_code_uri # ### Let's check our evaluation report out_s3_report_uri = processor.latest_job.outputs[0].destination out_s3_report_uri reports_list = S3Downloader.list(out_s3_report_uri) reports_list # + report = S3Downloader.read_file(reports_list[0]) print("=====Model Report====") print(json.dumps(json.loads(report.split('\n')[0]), indent=2)) # + model_name="xgboost-readmission-1" model = estimator.create_model(name=model_name) container_def = model.prepare_container_def() sm_sess.create_model(model_name, role, container_def) # - from sagemaker import clarify clarify_processor = clarify.SageMakerClarifyProcessor(role=role, instance_count=1, instance_type='ml.m5.xlarge', sagemaker_session=sm_sess) columns_headers = ['Readmission.Status', 'ER', 'LOS', 'Age', 'HCC.Riskscore ', 'Gender_F', 'Gender_M', 'Race_Black', 'Race_Hispanic', 'Race_Other', 'Race_White', 'DRG.class_MED', 'DRG.class_SURG', 'DRG.class_UNGROUP', 'DRG.Complication_MedicalMCC.CC', 'DRG.Complication_MedicalNoC', 'DRG.Complication_Other', 'DRG.Complication_SurgMCC.CC', 'DRG.Complication_SurgNoC'] # ### Writing ModelConfig and Bias config # + model_config = clarify.ModelConfig(model_name=model_name, instance_type='ml.m5.xlarge', instance_count=1, accept_type='text/csv', content_type='text/csv') # - # ## Explaining Predictions using SHAP df.columns = columns_headers[1:] df # + shap_config = clarify.SHAPConfig(baseline=[df.iloc[0].values.tolist()], num_samples=20, agg_method='mean_abs', save_local_shap_values=False) explainability_output_path = 's3://{}/{}/clarify-explainability'.format(bucket, prefix) explainability_data_config = clarify.DataConfig(s3_data_input_path=s3uri_train, s3_output_path=explainability_output_path, label='Readmission.Status', headers=columns_headers, dataset_type='text/csv') shap_config = clarify.SHAPConfig( baseline=[test_features.iloc[0].values.tolist()], num_samples=15, agg_method="mean_abs", save_local_shap_values=True, ) # - clarify_processor.run_explainability(data_config=explainability_data_config, model_config=model_config, explainability_config=shap_config) # ## Feature Explainability # The Model has `18` input features. We computed KernelShap on the dataset `dataset` and display the `10` features with the greatest feature attribution. # <br><img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAy4AAAG5CAYAAABoT2B5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy86wFpkAAAACXBIWXMAAA9hAAAPYQGoP6dpAACUVUlEQVR4nOzdeVyN6f8/8NfRdjqltJwoURHZjZ0UIS1KtphUtoxlErINMwyVfamx5qOxpGQbWTJKSnZmGmPseyODsZSlSIk6vz/8Ol/HOW2m5cjr+Xicx2O67uu+r/d936dxv7uWWyCRSCQgIiIiIiJSYtUqOwAiIiIiIqLiMHEhIiIiIiKlx8SFiIiIiIiUHhMXIiIiIiJSekxciIiIiIhI6TFxISIiIiIipcfEhYiIiIiIlB4TFyKqMiQSCTIzM8HXUxEREVU9TFyIqMp4+fIldHV18fLly8oOhYiIiMoYExciIiIiIlJ6TFyIiIiIiEjpMXEhIiIiIiKlx8SFiIiIiIiUHhMXIiIiIiJSekxciIiIiIhI6TFxISIiIiIipcfEhYiIiIiIlJ5qZQdARFTWdl14AJF2ZmWHQUREVGV4tDKt7BDY40JERERERMqPiQsRERERESk9Ji5ElUAgECAgIKBU+9jZ2aFZs2blExARERGRkmPiQp+F8PBwCAQCnD17VuF2RQ/1eXl52LRpE+zs7KCvrw8NDQ2Ym5tjxIgRCo+TkpKCMWPGoF69ehAKhdDR0UHnzp2xYsUKZGdnFxnf8OHDIRAIpB8NDQ00bNgQs2fPRk5OzqefOBEREREB4OR8qqKys7PRv39/HDx4EF26dMEPP/wAfX19pKamYufOndi8eTP++ecfmJq+n2h24MABDBw4EBoaGhg6dCiaNWuG3NxcnDx5EtOmTcOVK1cQFhZWZJsaGhpYv349ACAjIwP79u3D3LlzkZKSgqioKLn4VFX56wcAtWrVwqtXrxRue/fuHUJDQ+Hj41PBUREREZGy4ZMTVUnTpk3DwYMH8dNPP8Hf319m25w5c/DTTz9Jf75z5w48PDxgZmaGpKQkGBsbS7eNGzcOt2/fxoEDB4ptU1VVFd7e3tKffX19YW1tjW3btiEkJAQ1a9aUbhMKhf/h7JRbfn4+cnNzS3yO7969w4sXLxQmcjNmzEB+fn5Zh0hERESfIQ4Voyrn/v37WLduHXr27CmXtACAiooKpk6dKu1tWbJkCV69eoUNGzbIJC0FLC0tMXHixFLHIRAIYGNjA4lEgr///ltu24dzXF6+fAl/f3+Ym5tDQ0MDRkZG6NmzJ86dO1dkG4cOHYJIJMLgwYPx7t07AEBCQgJsbGxQo0YNaGtrw8rKCj/88IPMfjk5OQgICEDDhg0hFAphbGyM/v37IyUlRVonKysLU6ZMQZ06daChoQErKyssW7YMEolE7lz8/PwQFRWFpk2bQkNDAwcPHgQAPHjwAD4+PqhZsyY0NDTQtGlTbNy4sdTXkoiIiIg9LvRZycjIQHp6ulz527dvpf8dFxeHd+/eYciQISU65v79+1GvXj1YW1uXWZwFUlNTAQB6enpF1hs7dix27doFPz8/NGnSBE+fPsXJkydx7do1tG7dWuE+v/76K9zd3fH1119j48aNUFFRwZUrV+Dq6ooWLVogKCgIGhoauH37Nk6dOiXdLy8vD66urjh8+DA8PDwwceJEvHz5EgkJCbh8+TLq168PiUQCNzc3HDlyBCNHjsRXX32F+Ph4TJs2DQ8ePJDpsQKApKQk7Ny5E35+fjA0NIS5uTkeP36Mjh07ShMbsViMuLg4jBw5EpmZmQqTSiIiIqLCMHGhz4q9vX2h25o2bQoAuHbtGgCgefPmxR4vMzMTDx48QJ8+fcokvoKkKiMjA3v37kV0dDSaNWsGKyurIvc7cOAARo0aheDgYGnZd999V2j93bt3w8PDA8OHD8f//vc/VKv2vvM0ISEBubm5iIuLg6GhocJ9IyIicPjwYYSEhGDSpEnS8hkzZkh7U2JiYpCUlIR58+Zh5syZAN4Pmxs4cCBWrFgBPz8/1K9fX7rvjRs3cOnSJTRp0kRa9s033yAvLw+XLl2CgYEBgPcJ2uDBgxEQEIAxY8ZAU1OzyOtCREREVIBDxeizsmbNGiQkJMh9WrRoIa2Tmfn+jenVq1cv9nilqVucrKwsiMViiMViWFpaYurUqejcuTP27dsHgUBQ5L41atTA77//jn///bfYdrZt24avv/4aY8aMwbp166RJS8FxAGDfvn2Fzg2Jjo6GoaEhxo8fL7etIM7Y2FioqKhgwoQJMtunTJkCiUSCuLg4mfKuXbvKJC0SiQTR0dHo3bs3JBIJ0tPTpR9HR0dkZGQUOwyOiIiI6EPscaHPSvv27dG2bVu5cj09PWlvh46ODoD380aKU5q6xREKhdi/fz+A9/NslixZgidPnpSoV2HJkiUYNmwY6tSpgzZt2qBXr14YOnQo6tWrJ1Pvzp078Pb2xsCBA7Fq1Sq543z99ddYv349vvnmG8yYMQM9evRA//794e7uLk1wUlJSYGVlVeSqZnfv3oWJiYlcQte4cWPp9g9ZWFjI/JyWloYXL14gLCys0NXYnjx5Umj7RERERB9j4kJVTqNGjQAAly5dwldffVVkXR0dHZiYmODy5cv/uV0VFRWZoWyOjo5o1KgRxowZg5iYmCL3HTRoEGxtbbFnzx4cOnQIS5cuxeLFi7F79244OztL6xkbG8PY2BixsbE4e/asXBKnqamJ48eP48iRIzhw4AAOHjyIHTt2oHv37jh06BBUVFT+83kq8nFyVtDb4+3tjWHDhinc58NeMiIiIqLicKgYVTnOzs5QUVHBli1bSlTf1dUVKSkpOHPmTJnGYWxsjEmTJmH//v347bffSlTf19cXe/fuxZ07d2BgYID58+fL1BEKhfj111/RoEEDODk54cqVK3LHqVatGnr06IGQkBBcvXoV8+fPR1JSEo4cOQIAqF+/Pm7cuCGzoMHHzMzM8O+//8r1RF2/fl26vShisRjVq1dHXl4e7O3tFX6MjIyKvSZEREREBZi4UJVTp04djBo1CocOHVI4nCo/Px/BwcG4f/8+gPeT4LW0tPDNN9/g8ePHcvVTUlKwYsUK6c///POP9AG+OOPHj4dIJMKiRYsKrZOXl4eMjAyZMiMjI5iYmODNmzdy9XV1dREfHy9dMvnDJYyfPXsmV7+g16ngWAMGDEB6ejpWr14tV7dgcn6vXr2Ql5cnV+enn36CQCCQ6QVSREVFBQMGDEB0dLTC3qy0tLQi9yciIiL6GIeKUZUUHByMlJQUTJgwAbt374arqyv09PTwzz//4JdffsH169fh4eEB4H0PxNatW/H111+jcePGGDp0KJo1a4bc3FycPn0av/zyC4YPHy499tChQ3Hs2DG595koYmBggBEjRiA0NBTXrl2TzhH50MuXL2Fqagp3d3e0bNkS2traSExMxB9//CGzytiHDA0Npe9rsbe3x8mTJ1G7dm0EBQXh+PHjcHFxgZmZGZ48eYLQ0FCYmprCxsZGGn9ERAQmT56M5ORk2NraIisrC4mJifD19UWfPn3Qu3dvdOvWDTNnzkRqaipatmyJQ4cOYd++ffD395dZUawwixYtwpEjR9ChQweMGjUKTZo0wbNnz3Du3DkkJiYqTLKIiIiICsPEhaokkUiEuLg4hIeHY/PmzZg7dy5ev34NExMTdO/eHVFRUahdu7a0vpubGy5evIilS5di3759WLt2LTQ0NNCiRQsEBwdj1KhRnxzL5MmT8b///Q+LFy9GeHi4wlh9fX1x6NAh7N69G/n5+bC0tERoaCi+/fbbQo9bu3ZtJCYmwtbWFj179sTx48fh5uaG1NRUbNy4Eenp6TA0NETXrl0RGBgIXV1dAO97Q2JjYzF//nxs3boV0dHRMDAwgI2NjXQJ6WrVqiEmJgazZ8/Gjh07sGnTJpibm2Pp0qWYMmVKic67Zs2aSE5ORlBQEHbv3o3Q0FAYGBigadOmWLx4cekvJBEREX3RBJKS/NmYiKicGBoa4tGjRwpXOZsxYwYsLS3xzTfflOhYmZmZ0NXVxYbjVyHS/u9LXBMREdF7Hq1MKzsEznEhIiIiIiLlx6FiRFTpDA0NFZbn5OQoXESgOO4ta0vf0UNERERVAxMXIqpUBS8OJSIiIioKh4oREREREZHSY+JCRERERERKj4kLEREREREpPSYuRERERESk9Ji4EBERERGR0mPiQkRERERESo+JCxERERERKT0mLkREREREpPSYuBARERERkdJj4kJEREREREqPiQsRERERESk9Ji5ERERERKT0mLgQEREREZHSY+JCRERERERKj4kLEREREREpPSYuRERERESk9FQrOwAiorK268IDiLQzKzsM+sJ4tDKt7BCIiKo09rgQEREREZHSY+JCRERERERKj4kLEREREREpPSYuVGnCw8MhEAiQmpoqLbOzs4OdnZ1SxEJEREREyoOJiwIFD7EFH6FQCBMTEzg6OmLlypV4+fKlTP2AgACZ+mpqajA3N8eECRPw4sULhW3k5+cjIiICPXv2hKGhIdTU1GBkZAQHBweEhYXhzZs3JY53z549cHZ2hqGhIdTV1WFiYoJBgwYhKSnpv1yGKmnBggXYu3dvZYdRqILvRYcOHaCvr4/q1aujYcOGGDp0KH777bdKi2v48OEQCARo0aIFJBKJ3HaBQAA/P79PPn5OTg5++ukndOjQAbq6uhAKhWjYsCH8/Pxw8+bN/xI6ERERVRFcVawIQUFBsLCwwNu3b/Ho0SMcPXoU/v7+CAkJQUxMDFq0aCFTf+3atdDW1kZWVhYOHz6MVatW4dy5czh58qRMvezsbPTr1w/x8fGwtrbG1KlTUbNmTTx79gzHjh2Dr68vfv/9d2zYsKHI+CQSCXx8fBAeHo5WrVph8uTJqFWrFh4+fIg9e/agR48eOHXqFKytrcv82pSXQ4cOlevxFyxYAHd3d/Tt21emfMiQIfDw8ICGhka5tl+cCRMmYM2aNejTpw+8vLygqqqKGzduIC4uDvXq1UPHjh0rNb5Lly5h9+7dGDBgQJkdMz09HU5OTvjzzz/h6uoKT09PaGtr48aNG9i+fTvCwsKQm5tbZu0RERHR54mJSxGcnZ3Rtm1b6c/ff/89kpKS4OrqCjc3N1y7dg2amprS7e7u7jA0NAQAjBkzBh4eHtixYweSk5PRvn17ab1JkyYhPj4ey5cvx8SJE2XanDJlCm7duoWEhIRi4wsODkZ4eLg0mRIIBNJtM2fORGRkJFRVP69brK6uXintqqioQEVFpVLaLvD48WOEhoZi1KhRCAsLk9m2fPlypKWllUk7EokEOTk5Mt/dktDU1ESdOnUQFBSE/v37y3zf/ovhw4fjr7/+wq5du+QSorlz52LmzJll0g4RERF93jhUrJS6d++OH3/8EXfv3sWWLVuKrGtrawsASElJkZbdu3cP69evh5OTk1zSUqBBgwbw9fUt8tjZ2dlYuHAhGjVqhGXLlil8iBwyZIhMwvT3339j4MCB0NfXh0gkQseOHXHgwAGZfY4ePQqBQICdO3ciMDAQtWvXRvXq1eHu7o6MjAy8efMG/v7+MDIygra2NkaMGCE3rK1g2FBUVBSsrKwgFArRpk0bHD9+vMhzAhTPccnJyUFAQAAaNmwIoVAIY2Nj9O/fX+a6Llu2DNbW1jAwMICmpibatGmDXbt2ycWVlZWFzZs3S4f1DR8+HEDhc1xCQ0PRtGlTaGhowMTEBOPGjZMb/mdnZ4dmzZrh6tWr6NatG0QiEWrXro0lS5YUe74funPnDiQSCTp37iy3TSAQwMjISPpzwfDEjyk6D3Nzc7i6uiI+Ph5t27aFpqYm1q1bBwC4e/cu3NzcoKWlBSMjI2lSLRAIcPToUZljV6tWDbNmzcLFixexZ8+eYs/nyZMnGDlyJGrWrAmhUIiWLVti8+bNMnV+//13HDhwACNHjlTYi6OhoYFly5YV2xYRERFVfUxcPsGQIUMAFD+sqeDhUU9PT1oWFxeHvLw8eHt7/6cYTp48iWfPnsHT07NEPQWPHz+GtbU14uPj4evri/nz5yMnJwdubm4KH0IXLlyI+Ph4zJgxAz4+Pti9ezfGjh0LHx8f3Lx5EwEBAejfvz/Cw8OxePFiuf2PHTsGf39/eHt7IygoCE+fPoWTkxMuX75cqvPMy8uDq6srAgMD0aZNGwQHB2PixInIyMiQOdaKFSvQqlUrBAUFYcGCBVBVVcXAgQNlErPIyEhoaGjA1tYWkZGRiIyMxJgxYwptOyAgAOPGjYOJiQmCg4MxYMAArFu3Dg4ODnj79q1M3efPn8PJyQktW7ZEcHAwGjVqhOnTpyMuLq7E52pmZgYA+OWXX/D69esS71cSN27cwODBg9GzZ0+sWLECX331FbKystC9e3ckJiZiwoQJmDlzJk6fPo3p06cXehxPT080aNAAQUFBCue6FMjOzoadnR0iIyPh5eWFpUuXQldXF8OHD8eKFSuk9WJiYgD83+8UERERUWE+r3FESsLU1BS6uroyf/EHgGfPngEAsrKykJSUhDVr1kAsFqNLly7SOtevXwcANGvWTGbf3NxcZGb+35u+BQIBDAwMCo3h2rVrAIDmzZuXKOZFixbh8ePHOHHiBGxsbAAAo0aNQosWLTB58mT06dMH1ar9Xx777t07HDt2DGpqagCAtLQ0bN++HU5OToiNjQUA+Pr64vbt29i4cSNmz54t097ly5dx9uxZtGnTBgDg4eEBKysrzJ49G7t37y5RzAAQERGBw4cPIyQkBJMmTZKWz5gxQ+bB+ebNmzJDn/z8/NC6dWuEhITAxcUFAODt7Y2xY8eiXr16xSaOaWlpWLhwIRwcHBAXFye9No0aNYKfnx+2bNmCESNGSOv/+++/iIiIkD6Ajxw5EmZmZtiwYQOcnZ1LdK7GxsYYOnQoIiIiYGpqCjs7O3Tu3BkuLi5o1KhRiY5RmNu3b+PgwYNwdHSUloWEhODvv//G3r170adPHwDvhzi2atWq0OOoqKhg1qxZGDZsGPbu3Yt+/foprBcWFoZr165hy5Yt8PLyAgCMHTsWXbt2xaxZs+Dj44Pq1auX+ntMREREXy72uHwibW1tudXFrKysIBaLYW5uDh8fH1haWiIuLg4ikUhapyA50dbWltk3NjYWYrFY+in463thCo5TvXr1EsUbGxuL9u3bS5OWghhGjx6N1NRUXL16Vab+0KFDpUkLAHTo0EG6GMCHOnTogHv37uHdu3cy5Z06dZImLQBQt25d9OnTB/Hx8cjLyytRzAAQHR0NQ0NDjB8/Xm7bh0OlPkxanj9/joyMDNja2uLcuXMlbutDiYmJyM3Nhb+/v0xCN2rUKOjo6MgNsdPW1pZJhtTV1dG+fXv8/fffpWp306ZNWL16NSwsLLBnzx5MnToVjRs3Ro8ePfDgwYNPOhcAsLCwkElaAODgwYOoXbs23NzcpGVCoRCjRo0q8lheXl7F9rrExsaiVq1aGDx4sLRMTU0NEyZMwKtXr3Ds2DEApf8eExER0ZeLicsnevXqldzDVnR0NBISErB161Z07NgRT548kZsAXbDPq1evZMo7d+6MhIQEJCQkwMHBodj2dXR0AEAueSrM3bt3YWVlJVfeuHFj6fYP1a1bV+ZnXV1dAECdOnXkyvPz85GRkSFT3qBBA7m2GjZsiNevX5dqknlKSgqsrKyKXWTg119/RceOHSEUCqGvrw+xWIy1a9fKxVVSBdfj42umrq6OevXqyV0vU1NTuTknenp6eP78eanarVatGsaNG4c///wT6enp2LdvH5ydnZGUlAQPD49POJP3LCws5Mru3r2L+vXry8VtaWlZ5LEKel3Onz9f6NLSd+/eRYMGDWSSPkD++1ba7zERERF9uZi4fIL79+8jIyND7gGvS5cusLe3x+DBg5GQkABNTU14eXkhPz9fWqdgyM/Hcz3EYjHs7e1hb28PY2PjYmMoOM6lS5f+6+koVNi8mcLKi5rvUN5OnDgBNzc3CIVChIaGIjY2FgkJCfD09KywuMrjuhgYGMDNzQ2xsbHo2rUrTp48KX3gL2xFr8J6s0q7glhxvLy8YGlpWexcl+KU9/eYiIiIqg4mLp8gMjISAOSG3nxIW1sbc+bMwfnz57Fz505pubOzM1RUVBAVFfWfYrCxsYGenh62bdtWoqFXZmZmuHHjhlx5wZyb4oamldatW7fkym7evAmRSASxWFzi49SvXx83btyQmwz/oejoaAiFQsTHx8PHxwfOzs6wt7dXWLekS/gWXI+Pr1lubi7u3LlT5terOAXLcj98+BDA/y348PEKZx/3BBXFzMwMKSkpconH7du3i933w16Xffv2KTz2rVu3ZJJ2QP771rt3bwAodoU+IiIiIiYupZSUlIS5c+fCwsJCOum4MF5eXjA1NZVZdatu3brw8fFBXFwcVq9erXA/RX/BTklJkVkMQCQSYfr06bh27RqmT5+ucJ8tW7YgOTkZANCrVy8kJyfjzJkz0u1ZWVkICwuDubk5mjRpUvSJl9KZM2dk5pfcu3cP+/btg4ODQ6nelzJgwACkp6crvFYF56yiogKBQCCTwKWmpiocxqSlpSX3sK+Ivb091NXVsXLlSplru2HDBmRkZEgn/JelR48eyc01At4nS4cPH0a1atWkvXz169cHAJklpguWei4pR0dHPHjwQLqyF/B+6emff/65RPt7e3vD0tISgYGBctt69eqFR48eYceOHdKyd+/eYdWqVdDW1kbXrl0BvJ8L5eTkhPXr1yu8X7m5uZg6dWqJz4mIiIiqLq4qVoS4uDhcv34d7969w+PHj5GUlISEhASYmZkhJiYGQqGwyP3V1NQwceJETJs2DQcPHoSTkxOA9y8TvHPnDsaPH4/t27ejd+/eMDIyQnp6Ok6dOoX9+/fLza3o0aMHAMi8n2PatGm4cuUKgoODceTIEbi7u6NWrVp49OgR9u7di+TkZJw+fRrA+1W4tm3bBmdnZ0yYMAH6+vrYvHkz7ty5g+joaLm5CP9Vs2bN4OjoiAkTJkBDQwOhoaEAoPAhtygFq2xNnjwZycnJsLW1RVZWFhITE+Hr64s+ffrAxcUFISEhcHJygqenJ548eYI1a9bA0tISFy9elDlemzZtkJiYiJCQEJiYmMDCwgIdOnSQa1csFuP7779HYGAgnJyc4Obmhhs3biA0NBT<KEY>//<KEY>><br>
Reliance_Test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="X6QIH3eK6Hwd" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="tbfUwHym6vhs" colab_type="code" colab={} # list of all xs x = list(range(1, 11)) # + id="C5oA9j5B7THl" colab_type="code" outputId="45bb6609-a114-41b3-ea99-f7df75d02fa5" executionInfo={"status": "ok", "timestamp": 1571315496557, "user_tz": -660, "elapsed": 1166, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAp-Td-yKvu76Tg0Swzal8U17btuwNIXFmWVwZo=s64", "userId": "11337101975325054847"}} colab={"base_uri": "https://localhost:8080/", "height": 34} print(x) # + id="RFRcEQHp9ZdC" colab_type="code" colab={} # pmf p_X_x = [1/len(x)] * len(x) # + id="PSoAZpl99nLv" colab_type="code" outputId="316a65df-1e28-4dd7-87ad-63fc33207efb" executionInfo={"status": "ok", "timestamp": 1571315496561, "user_tz": -660, "elapsed": 1154, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAp-Td-yKvu76Tg0Swzal8U17btuwNIXFmWVwZo=s64", "userId": "11337101975325054847"}} colab={"base_uri": "https://localhost:8080/", "height": 34} # sums to print(p_X_x) # + id="qShRhhOn934X" colab_type="code" outputId="b3f0f052-558e-4bdf-b294-218a980dd36b" executionInfo={"status": "ok", "timestamp": 1571315497065, "user_tz": -660, "elapsed": 1649, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAp-Td-yKvu76Tg0Swzal8U17btuwNIXFmWVwZo=s64", "userId": "11337101975325054847"}} colab={"base_uri": "https://localhost:8080/", "height": 300} plt.bar(x, p_X_x) plt.xlabel('X') plt.ylabel('P(X=x)') # + id="M72x9J89FnN8" colab_type="code" colab={} import numpy as np import scipy.stats as stats # + id="M2ssEju6Gg1A" colab_type="code" colab={} # range of xs x = np.linspace(-10, 10, 100) # + id="q9ikfnk-HEhE" colab_type="code" colab={} # first normal distribution with mean = 0, variance = 1 p_X_1 = stats.norm.pdf(x=x, loc=0.0, scale=1.0**2) # second normal distribution with mean = 0, variance = 2.25 p_X_2 = stats.norm.pdf(x=x, loc=0.0, scale=1.5**2) # + id="sYZ9BRflIJkE" colab_type="code" outputId="ff365e43-a2c7-4b3b-dfdf-65ec397401e5" executionInfo={"status": "ok", "timestamp": 1571315524484, "user_tz": -660, "elapsed": 1234, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAp-Td-yKvu76Tg0Swzal8U17btuwNIXFmWVwZo=s64", "userId": "11337101975325054847"}} colab={"base_uri": "https://localhost:8080/", "height": 300} plt.plot(x,p_X_1, color='blue') plt.plot(x, p_X_2, color='orange') plt.xlabel('X') plt.ylabel('P(X)')
Chapter08/Examples/distributions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Averaging & Coefficient # + calculate all prediction's mean value. # + recode very tiny prediction to 0. # + multiply a constant close to 1. import numpy as np import pandas as pd import time predictions = pd.read_csv('./predictions_cus.csv') prediction_val = pd.read_csv('./prediction_val_cus.csv') subxx1 = pd.concat([prediction_val, predictions]) #subxx1 predictions = pd.read_csv('./predictions_reg.csv') prediction_val = pd.read_csv('./prediction_val_reg.csv') subxx2 = pd.concat([prediction_val, predictions]) # subxx2 predictions = pd.read_csv('./predictions_poi.csv') prediction_val = pd.read_csv('./prediction_val_poi.csv') subxx3 = pd.concat([prediction_val, predictions]) # subxx3 subxx4 = pd.read_csv('./submission_under.csv') #subxx4 def load_data(): '''load all submission of single models ''' submission1 = pd.read_csv('./submission1.csv') submission2 = pd.read_csv('./submission2.csv') submission3 = pd.read_csv('./submission3.csv') submission4 = pd.read_csv('./submission4.csv') #submission5 = pd.read_csv('./submission5.csv') print('Our dataset has {} rows and {} columns'.format(submission1.shape[0], submission1.shape[1])) return submission1, submission2, submission3, submission4 #, submission5 submission1, submission2, submission3, submission4 = load_data() # + col = ['F' + str(i + 1) for i in range(28)] col def multiply_recode(df): '''recode x < 0.05 -> 0 multiply a constant 1.02 ''' df[col] = np.where(df[col] < 0.05, 0, df[col]) df[col] = df[col] * 1.02 return df # - def averaging(): '''mean based blending ''' start = time.time() fusion = pd.concat([submission1, submission2, submission3, submission4]) print('Our dataset has {} rows and {} columns'.format(fusion.shape[0], fusion.shape[1])) gpmean = fusion.groupby(['id']).mean() gpmean = pd.DataFrame(gpmean) gpmean['id'] = gpmean.index #gpmean = multiply_recode(gpmean) print('Our dataset has {} rows and {} columns'.format(gpmean.shape[0], gpmean.shape[1])) gpmean.to_csv('submission_averaging_no_recode0.csv', index = False) print('This program costs %7.2f seconds'%(time.time() - start)) averaging()
accuracy/.ipynb_checkpoints/final-processing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="eIrvnAbGZ1wP" # ##### Copyright 2019 The TensorFlow Authors. # + cellView="form" colab_type="code" id="_A4IPZ-WZ9H7" colab={} #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="WpaDQG8VaYFO" # # Masking and padding with Keras # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/keras/masking_and_padding"> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> # View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/masking_and_padding.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/masking_and_padding.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # <td> # <a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/masking_and_padding.ipynb"> # <img src="https://www.tensorflow.org/images/download_logo_32px.png" /> # Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="QGJH5EKYoSHZ" # ## Setup # + colab_type="code" id="wJEBe8hTlB6W" colab={} from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np try: # # %tensorflow_version only exists in Colab. # %tensorflow_version 2.x except Exception: pass import tensorflow as tf from tensorflow.keras import layers # + [markdown] colab_type="text" id="5ShanCQ_pSPO" # ## Padding sequence data # # When processing sequence data, it is very common for individual samples to have different lengths. Consider the following example (text tokenized as words): # # ``` # [ # ["The", "weather", "will", "be", "nice", "tomorrow"], # ["How", "are", "you", "doing", "today"], # ["Hello", "world", "!"] # ] # ``` # # After vocabulary lookup, the data might be vectorized as integers, e.g.: # # ``` # [ # [83, 91, 1, 645, 1253, 927], # [73, 8, 3215, 55, 927], # [71, 1331, 4231] # ] # ``` # # The data is a 2D list where individual samples have length 6, 5, and 3 respectively. Since the input data for a deep learning model must be a single tensor (of shape e.g. `(batch_size, 6, vocab_size)` in this case), samples that are shorter than the longest item need to be padded with some placeholder value (alternatively, one might also truncate long samples before padding short samples). # # Keras provides an API to easily truncate and pad sequences to a common length: `tf.keras.preprocessing.sequence.pad_sequences`. # + colab_type="code" id="xI-lHnyxfa2T" colab={} raw_inputs = [ [83, 91, 1, 645, 1253, 927], [73, 8, 3215, 55, 927], [711, 632, 71] ] # By default, this will pad using 0s; it is configurable via the # "value" parameter. # Note that you could "pre" padding (at the beginning) or # "post" padding (at the end). # We recommend using "post" padding when working with RNN layers # (in order to be able to use the # CuDNN implementation of the layers). padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(raw_inputs, padding='post') print(padded_inputs) # + [markdown] colab_type="text" id="HyHf90yAqkMr" # ## Masking # + [markdown] colab_type="text" id="o16pUIBLgc_Q" # Now that all samples have a uniform length, the model must be informed that some part of the data is actually padding and should be ignored. That mechanism is <b>masking</b>. # # There are three ways to introduce input masks in Keras models: # - Add a `keras.layers.Masking` layer. # - Configure a `keras.layers.Embedding` layer with `mask_zero=True`. # - Pass a `mask` argument manually when calling layers that support this argument (e.g. RNN layers). # + [markdown] id="f6QMNceyf1GD" colab_type="text" # ## Mask-generating layers: `Embedding` and `Masking` # # Under the hood, these layers will create a mask tensor (2D tensor with shape `(batch, sequence_length)`), and attach it to the tensor output returned by the `Masking` or `Embedding` layer. # + colab_type="code" id="rYXQ589PkC0P" colab={} embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) masked_output = embedding(padded_inputs) print(masked_output._keras_mask) # + colab_type="code" id="-0VVscXQm1D1" colab={} masking_layer = layers.Masking() # Simulate the embedding lookup by expanding the 2D input to 3D, # with embedding dimension of 10. unmasked_embedding = tf.cast( tf.tile(tf.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), tf.float32) masked_embedding = masking_layer(unmasked_embedding) print(masked_embedding._keras_mask) # + [markdown] colab_type="text" id="RL2vsiCBmVck" # As you can see from the printed result, the mask is a 2D boolean tensor with shape `(batch_size, sequence_length)`, where each individual `False` entry indicates that the corresponding timestep should be ignored during processing. # + [markdown] colab_type="text" id="s4jsu6oTrl2f" # ## Mask propagation in the Functional API and Sequential API # + [markdown] colab_type="text" id="0KgNt7fvm0Jx" # When using the Functional API or the Sequential API, a mask generated by an `Embedding` or `Masking` layer will be propagated through the network for any layer that is capable of using them (for example, RNN layers). Keras will automatically fetch the mask corresponding to an input and pass it to any layer that knows how to use it. # # Note that in the `call` method of a subclassed model or layer, masks aren't automatically propagated, so you will need to manually pass a `mask` argument to any layer that needs one. See the section below for details. # # For instance, in the following Sequential model, the `LSTM` layer will automatically receive a mask, which means it will ignore padded values: # + id="zfkxyf7yVyxJ" colab_type="code" colab={} model = tf.keras.Sequential([ layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32), ]) # + [markdown] id="UqZeTeEhWHLE" colab_type="text" # This is also the case for the following Functional API model: # + id="SYaVl6WSWJal" colab_type="code" colab={} inputs = tf.keras.Input(shape=(None,), dtype='int32') x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs) outputs = layers.LSTM(32)(x) model = tf.keras.Model(inputs, outputs) # + [markdown] id="crxoxoRDWg8t" colab_type="text" # ## Passing mask tensors directly to layers # + [markdown] id="UfvcEl20XRYA" colab_type="text" # Layers that can handle masks (such as the `LSTM` layer) have a `mask` argument in their `__call__` method. # # Meanwhile, layers that produce a mask (e.g. `Embedding`) expose a `compute_mask(input, previous_mask)` method which you can call. # # Thus, you can do something like this: # # # + id="coCV26fqXmya" colab_type="code" colab={} class MyLayer(layers.Layer): def __init__(self, **kwargs): super(MyLayer, self).__init__(**kwargs) self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True) self.lstm = layers.LSTM(32) def call(self, inputs): x = self.embedding(inputs) # Note that you could also prepare a `mask` tensor manually. # It only needs to be a boolean tensor # with the right shape, i.e. (batch_size, timesteps). mask = self.embedding.compute_mask(inputs) output = self.lstm(x, mask=mask) # The layer will ignore the masked values return output layer = MyLayer() x = np.random.random((32, 10)) * 100 x = x.astype('int32') layer(x) # + [markdown] id="uSZP15mtWs9Z" colab_type="text" # ## Supporting masking in your custom layers # + [markdown] id="w2gg7O3kVjC4" colab_type="text" # Sometimes you may need to write layers that generate a mask (like `Embedding`), or layers that need to modify the current mask. # # For instance, any layer that produces a tensor with a different time dimension than its input, such as a `Concatenate` layer that concatenates on the time dimension, will need to modify the current mask so that downstream layers will be able to properly take masked timesteps into account. # # To do this, your layer should implement the `layer.compute_mask()` method, which produces a new mask given the input and the current mask. # # Most layers don't modify the time dimension, so don't need to worry about masking. The default behavior of `compute_mask()` is just pass the current mask through in such cases. # # Here is an example of a `TemporalSplit` layer that needs to modify the current mask. # + colab_type="code" id="gaS_7dXyr-Z0" colab={} class TemporalSplit(tf.keras.layers.Layer): """Split the input tensor into 2 tensors along the time dimension.""" def call(self, inputs): # Expect the input to be 3D and mask to be 2D, split the input tensor into 2 # subtensors along the time axis (axis 1). return tf.split(inputs, 2, axis=1) def compute_mask(self, inputs, mask=None): # Also split the mask into 2 if it presents. if mask is None: return None return tf.split(mask, 2, axis=1) first_half, second_half = TemporalSplit()(masked_embedding) print(first_half._keras_mask) print(second_half._keras_mask) # + [markdown] id="5t73OUJgjLLW" colab_type="text" # Here is another example of a `CustomEmbedding` layer that is capable of generating a mask from input values: # + id="fLSpf1iojSO7" colab_type="code" colab={} class CustomEmbedding(tf.keras.layers.Layer): def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs): super(CustomEmbedding, self).__init__(**kwargs) self.input_dim = input_dim self.output_dim = output_dim self.mask_zero = mask_zero def build(self, input_shape): self.embeddings = self.add_weight( shape=(self.input_dim, self.output_dim), initializer='random_normal', dtype='float32') def call(self, inputs): return tf.nn.embedding_lookup(self.embeddings, inputs) def compute_mask(self, inputs, mask=None): if not self.mask_zero: return None return tf.not_equal(inputs, 0) layer = CustomEmbedding(10, 32, mask_zero=True) x = np.random.random((3, 10)) * 9 x = x.astype('int32') y = layer(x) mask = layer.compute_mask(x) print(mask) # + [markdown] id="fUopC-DelkG2" colab_type="text" # ## Writing layers that need mask information # # Some layers are mask *consumers*: they accept a `mask` argument in `call` and use it to dertermine whether to skip certain time steps. # # To write such a layer, you can simply add a `mask=None` argument in your `call` signature. The mask associated with the inputs will be passed to your layer whenever it is available. # # ```python # # class MaskConsumer(tf.keras.layers.Layer): # # def call(self, inputs, mask=None): # ... # # # ``` # + [markdown] id="_50qkZjIn8b2" colab_type="text" # ## Recap # # That is all you need to know about masking in Keras. To recap: # # - "Masking" is how layers are able to know when to skip / ignore certain timesteps in sequence inputs. # - Some layers are mask-generators: `Embedding` can generate a mask from input values (if `mask_zero=True`), and so can the `Masking` layer. # - Some layers are mask-consumers: they expose a `mask` argument in their `__call__` method. This is the case for RNN layers. # - In the Functional API and Sequential API, mask information is propagated automatically. # - When writing subclassed models or when using layers in a standalone way, pass the `mask` arguments to layers manually. # - You can easily write layers that modify the current mask, that generate a new mask, or that consume the mask associated with the inputs. #
site/en/guide/keras/masking_and_padding.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from mxnet import gluon import mxnet as mx from mxnet import nd from mxnet.gluon import nn import numpy as np from tqdm import tqdm from tmpref.model import Deeplabv3 import keras.backend as K import tensorflow as tf from keras.models import Model # + def get_gpu_session(ratio=None, interactive=False): config = tf.ConfigProto(allow_soft_placement=True) if ratio is None: config.gpu_options.allow_growth = True else: config.gpu_options.per_process_gpu_memory_fraction = ratio if interactive: sess = tf.InteractiveSession(config=config) else: sess = tf.Session(config=config) return sess def set_gpu_usage(ratio=None): sess = get_gpu_session(ratio) K.set_session(sess) # - set_gpu_usage() keras_model = Deeplabv3(backbone="xception") keras_model.summary() # + # from keras.utils import plot_model # plot_model(keras_model,to_file='xception.jpg',show_shapes=True) # - for layer in keras_model.layers: print(layer.name) nn.Conv2D(kernel_size=(5,4),channels=3,in_channels=3,groups=3).weight from keras.layers import Input, Conv2D,DepthwiseConv2D inputs = Input((10,10,3)) # conv1 = Conv2D(20,kernel_size=(5,4))(inputs) conv1 = DepthwiseConv2D(kernel_size=(5,4))(inputs) Model(inputs, conv1).weights def compute_same_padding(kernel_size, dilation): #TODO: compute `same` padding for stride<=2 ? kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) pad_total = kernel_size_effective - 1 pad_beg = pad_total // 2 pad_end = pad_total - pad_beg return pad_beg, pad_end class SeparableConv(nn.HybridBlock): #TODO: should I fix epsilon in BN layer? def __init__(self, out_filters, kernel_size, strides, dilation, depth_activation, in_filters=None, prefix=None): super(SeparableConv,self).__init__(prefix=prefix) if in_filters is None: in_filters = out_filters self.depth_activation = depth_activation padding = compute_same_padding(kernel_size, dilation) # filter_in==filter_out with self.name_scope(): self.depthwise_conv = nn.Conv2D(in_filters, in_channels=in_filters, groups=in_filters, dilation=dilation,use_bias=False, padding=padding, strides=strides, kernel_size=kernel_size,prefix='depthwise_') self.bn1 = nn.BatchNorm(axis=1,prefix='depthwise_BN_') self.pointwise_conv = nn.Conv2D(out_filters, kernel_size=1,use_bias=False, prefix='pointwise_') self.bn2 = nn.BatchNorm(axis=1,prefix='pointwise_BN_') def hybrid_forward(self, F, x): if not self.depth_activation: x = F.relu(x) x = self.depthwise_conv(x) x = self.bn1(x) if self.depth_activation: x = F.relu(x) x = self.pointwise_conv(x) x = self.bn2(x) if self.depth_activation: x = F.relu(x) return x # + # # ?nn.BatchNorm(axis=1) # - inputs = nd.random_normal(shape=(8,16,48,48),ctx=mx.gpu(0)) sep_conv = SeparableConv(16,3,1,1,False) sep_conv.initialize(ctx=mx.gpu(0)) sep_conv(inputs).shape # + # # ?nn.Conv2D # - sep_conv.depthwise_conv.name class XceptionBlock(nn.HybridBlock): def __init__(self, filters_list, kernel_size, strides, dilation, depth_activation, in_filters=None, use_shortcut_conv=None, prefix=None, return_skip=False, use_shortcut=True): super(XceptionBlock,self).__init__(prefix=prefix) assert len(filters_list)==3 if in_filters is None: in_filters = filters_list[0] if use_shortcut_conv is None: use_shortcut_conv=strides>1 with self.name_scope(): self.conv1 = SeparableConv(filters_list[0], kernel_size, 1, dilation, depth_activation, in_filters=in_filters, prefix='separable_conv1_') self.conv2 = SeparableConv(filters_list[1], kernel_size, 1, dilation, depth_activation, in_filters=filters_list[0],prefix='separable_conv2_') self.conv3 = SeparableConv(filters_list[2], kernel_size, strides, dilation, depth_activation, in_filters=filters_list[1],prefix='separable_conv3_') if use_shortcut_conv: self.shortcut_conv = nn.Conv2D(filters_list[2], kernel_size=1, strides=strides,use_bias=False,prefix='shortcut_') self.shortcut_bn = nn.BatchNorm(axis=1,prefix='shortcut_BN_') self.use_shortcut_conv = use_shortcut_conv self.use_shortcut = use_shortcut self.return_skip = return_skip def hybrid_forward(self, F, x): shortcut = x x = self.conv1(x) x = self.conv2(x) skip = x x = self.conv3(x) if self.use_shortcut_conv: shortcut = self.shortcut_conv(shortcut) shortcut = self.shortcut_bn(shortcut) if self.return_skip: return x+shortcut, skip if self.use_shortcut: return x+shortcut return x class EntryFlow(nn.HybridBlock): def __init__(self, prefix='entry_flow_'): super(EntryFlow,self).__init__(prefix) with self.name_scope(): self.conv1 = nn.HybridSequential(prefix='conv1_') with self.conv1.name_scope(): self.conv1.add(nn.Conv2D(32, kernel_size=3, strides=2, padding=1,use_bias=False,prefix='1_')) self.conv1.add(nn.BatchNorm(axis=1,prefix='1_BN_')) self.conv1.add(nn.Activation("relu")) self.conv2 = nn.HybridSequential(prefix='conv1_') with self.conv2.name_scope(): self.conv2.add(nn.Conv2D(64, kernel_size=3, padding=1,use_bias=False,prefix='2_')) self.conv2.add(nn.BatchNorm(axis=1,prefix='2_BN_')) self.conv2.add(nn.Activation("relu")) self.conv3 = XceptionBlock(filters_list=[128,128,128], kernel_size=3, strides=2, dilation=1, depth_activation=False,in_filters=64,prefix='block1_') self.conv4 = XceptionBlock(filters_list=[256,256,256], kernel_size=3, strides=2,return_skip=True, dilation=1, depth_activation=False, in_filters=128,prefix='block2_') self.conv5 = XceptionBlock(filters_list=[728,728,728], kernel_size=3, strides=2, dilation=1, depth_activation=False, in_filters=256,prefix='block3_') def hybrid_forward(self, F, x): x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x, skip = self.conv4(x) x = self.conv5(x) return x, skip entry_flow = EntryFlow() entry_flow.initialize(ctx=mx.gpu()) middle_flow = nn.HybridSequential(prefix="middle_flow_") with middle_flow.name_scope(): for i in range(16): middle_flow.add(XceptionBlock(filters_list=[728,728,728], kernel_size=3, strides=1, dilation=1, depth_activation=False,in_filters=728, prefix="unit_%s_"%(i+1))) middle_flow.initialize(ctx=mx.gpu(0)) exit_flow = nn.HybridSequential(prefix="exit_flow_") with exit_flow.name_scope(): exit_flow.add(XceptionBlock(filters_list=[728, 1024, 1024], kernel_size=3, strides=1,use_shortcut_conv=True, dilation=1, depth_activation=False,in_filters=728, prefix="block1_")) exit_flow.add(XceptionBlock(filters_list=[1536, 1536, 2048], kernel_size=3, strides=1, dilation=2, depth_activation=True, in_filters=1024,use_shortcut=False, prefix="block2_")) exit_flow.initialize(ctx=mx.gpu(0)) inputs = nd.random_normal(shape=(8,3,512,512),ctx=mx.gpu(0)) entry_features, skip = entry_flow(inputs) entry_features.shape skip.shape middle_features = middle_flow(entry_features) middle_features.shape exit_features = exit_flow(middle_features) exit_features.shape exit_flow.collect_params() class PoolRecover(nn.HybridBlock): def __init__(self): super(PoolRecover,self).__init__() self.gap = nn.HybridSequential() self.gap.add(nn.GlobalAvgPool2D()) self.gap.add(nn.Conv2D(256, kernel_size=1, use_bias=False, prefix='image_pooling_')) self.gap.add(nn.BatchNorm(prefix='image_pooling_BN_')) self.gap.add(nn.Activation("relu")) def hybrid_forward(self, F, x): *_, h, w = x.shape pool = self.gap(x) return F.contrib.BilinearResize2D(pool, height=h, width=w) class ASPP(nn.HybridBlock): def __init__(self): super(ASPP, self).__init__() b0 = nn.HybridSequential() b0.add(nn.Conv2D(256, kernel_size=1, use_bias=False, prefix='aspp0_')) b0.add(nn.BatchNorm(prefix='aspp0_BN_')) b0.add(nn.Activation("relu")) # rate = 6 (12) b1 = SeparableConv(256, kernel_size=3, strides=1, dilation=6, depth_activation=True, in_filters=2048, prefix='aspp1_') # rate = 12 (24) b2 = SeparableConv(256, kernel_size=3, strides=1, dilation=12, depth_activation=True, in_filters=2048, prefix='aspp2_') # rate = 18 (36) b3 = SeparableConv(256, kernel_size=3, strides=1, dilation=18, depth_activation=True, in_filters=2048, prefix='aspp3_') b4 = PoolRecover() self.concurent = gluon.contrib.nn.HybridConcurrent(axis=1) self.concurent.add(b4) self.concurent.add(b0) self.concurent.add(b1) self.concurent.add(b2) self.concurent.add(b3) self.project = nn.HybridSequential() self.project.add(nn.Conv2D(256, kernel_size=1, use_bias=False, prefix='concat_projection_')) self.project.add(nn.BatchNorm(prefix='concat_projection_BN_')) self.project.add(nn.Activation("relu")) self.project.add(nn.Dropout(0.1)) def hybrid_forward(self, F, x): return self.project(self.concurent(x)) aspp = ASPP() aspp.initialize(ctx=mx.gpu()) aspp(exit_features).shape aspp.collect_params() keras_to_end = Model(inputs=keras_model.inputs, outputs=keras_model.get_layer("dropout_1").output) cnt=0 cnt+=sum([np.prod(v.shape) for k,v in entry_flow.collect_params().items()]) cnt+=sum([np.prod(v.shape) for k,v in middle_flow.collect_params().items()]) cnt+=sum([np.prod(v.shape) for k,v in exit_flow.collect_params().items()]) cnt+=sum([np.prod(v.shape) for k,v in aspp.collect_params().items()]) cnt sum([np.prod(v.shape) for k,v in middle_flow.collect_params().items()]) sum([np.prod(v.shape) for k,v in exit_flow.collect_params().items()]) keras_to_end.summary() class DeepLabv3p(nn.HybridBlock): def __init__(self, classes=21): super(DeepLabv3p, self).__init__() self.entry_flow = EntryFlow(prefix="entry_flow_") middle_flow = nn.HybridSequential(prefix="middle_flow_") with middle_flow.name_scope(): for i in range(16): middle_flow.add(XceptionBlock(filters_list=[728,728,728], kernel_size=3, strides=1, dilation=1, depth_activation=False,in_filters=728, prefix="unit_%s_"%(i+1))) self.middle_flow = middle_flow exit_flow = nn.HybridSequential(prefix="exit_flow_") with exit_flow.name_scope(): exit_flow.add(XceptionBlock(filters_list=[728, 1024, 1024], kernel_size=3, strides=1,use_shortcut_conv=True, dilation=1, depth_activation=False,in_filters=728, prefix="block1_")) exit_flow.add(XceptionBlock(filters_list=[1536, 1536, 2048], kernel_size=3, strides=1, dilation=2, depth_activation=True, in_filters=1024,use_shortcut=False, prefix="block2_")) self.exit_flow = exit_flow self.aspp = ASPP() skip_project = nn.HybridSequential() skip_project.add(nn.Conv2D(48, kernel_size=1, use_bias=False, prefix='feature_projection0_')) skip_project.add(nn.BatchNorm(prefix='feature_projection0_BN_')) skip_project.add(nn.Activation("relu")) self.skip_project = skip_project decoder = nn.HybridSequential() decoder.add(SeparableConv(256, kernel_size=3, strides=1, dilation=1, depth_activation=True, in_filters=304, prefix='decoder_conv0_')) decoder.add(SeparableConv(256, kernel_size=3, strides=1, dilation=1, depth_activation=True, in_filters=256, prefix='decoder_conv1_')) decoder.add(nn.Conv2D(classes, kernel_size=1, use_bias=True, prefix='logits_semantic_')) self.decoder = decoder def hybrid_forward(self, F, x): *_, h, w = x.shape x, skip = self.entry_flow(x) x = self.middle_flow(x) x = self.exit_flow(x) x = self.aspp(x) x = F.contrib.BilinearResize2D(x, height=h//4, width=w//4) skip = self.skip_project(skip) x = F.concat(x, skip, dim=1) x = self.decoder(x) return F.contrib.BilinearResize2D(x, height=h, width=w) deeplab = DeepLabv3p() deeplab.initialize(ctx=mx.gpu()) deeplab(inputs).shape deeplab.collect_params() # + # deeplab.save_params("tmpref/deeplabv3p.params") # - keras_model.summary() sum([np.prod(v.shape) for k,v in deeplab.collect_params().items()]) 512/16 nn.Con entry_flow.conv3.collect_params() # + # len(weights) # - entry_flow.collect_params() middle_flow.collect_params() keras_model.weights class WeightConverter: def __init__(self, keras_model=None, weight_dict=None): if keras_model is None: self.weight_dict = weight_dict elif weight_dict is None: self.weight_dict = dict() for weight_var in tqdm(keras_model.weights): self.weight_dict[weight_var.name] = K.eval(weight_var) else: raise ValueError def replace_depthwise_weight(self, query, param): target = query.replace("_weight",'/depthwise_kernel:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight.transpose(2,3,0,1)) #TODO: is it right? def replace_weight(self, query, param): target = query.replace("_weight",'/kernel:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight.transpose(3,2,0,1)) def replace_bias(self, query, param): target = query.replace("_bias",'/bias:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight) def replace_gamma(self, query, param): target = query.replace("_gamma",'/gamma:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight) def replace_beta(self, query, param): target = query.replace("_beta",'/beta:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight) def replace_running_mean(self, query, param): target = query.replace("_running_mean",'/moving_mean:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight) def replace_running_var(self, query, param): target = query.replace("_running_var",'/moving_variance:0') assert target in self.weight_dict, "%s->%s"%(query, target) weight = self.weight_dict[target] param.set_data(weight) def set_parameters(self, gluon_model): for k, param in gluon_model.collect_params().items(): if k.endswith("depthwise_weight"): self.replace_depthwise_weight(k, param) elif k.endswith("weight"): self.replace_weight(k, param) elif k.endswith("bias"): self.replace_bias(k, param) elif k.endswith("gamma"): self.replace_gamma(k, param) elif k.endswith("beta"): self.replace_beta(k, param) elif k.endswith("running_mean"): self.replace_running_mean(k, param) elif k.endswith("running_var"): self.replace_running_var(k, param) else: raise ValueError # converter = WeightConverter(weight_dict=weights_dict) converter = WeightConverter(keras_model=keras_model) converter.set_parameters(deeplab) deeplab.save_params("tmpref/deeplabv3p.params") type(entry_flow.collect_params()) converter.set_parameters(entry_flow) converter.set_parameters(middle_flow) converter.set_parameters(exit_flow) converter.set_parameters(aspp) # + # entry_flow.save_params("tmpref/entry_flow.params") # middle_flow.save_params("tmpref/middle_flow.params") # exit_flow.save_params("tmpref/exit_flow.params") # aspp.save_params("tmpref/aspp.params") # - xception = nn.HybridSequential() xception.add(entry_flow) xception.add(middle_flow) xception.add(exit_flow) xception
workspace/4.backbone.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Manipulating shape and indexing # + import torch import nestedtensor from IPython.display import Markdown, display def print_eval(s): colorS = "<span style='color:darkred'>$ {}</span>".format(s) display(Markdown('**{}**'.format(colorS))) print('{}\n'.format(str(eval(s)))) # - nt2 = nestedtensor.nested_tensor( [ [ torch.tensor([[1.0, 0.5], [0.1, 0.6]]), torch.tensor([[5.5, 3.3], [2.2, 6.6]]) ], [ torch.tensor([[3.0, 1.0], [0.5, 0.7]]), torch.tensor([[5.0, 4.0], [1.0, 2.0]]) ] ]) print_eval('nt2') nt4 = nt2.to_tensor() print_eval("nt2") print_eval("nt4") print_eval("nt4.size()") # print_eval("nt4.nested_dim()") Will crash. nt4 is a regular Tensor! # print_eval("nt4.nested_size()") Will crash. nt4 is a regular Tensor! print_eval("nt2") print_eval("nt2[0][0]") print_eval("nt2[0, 0]") print_eval("nt2[:, 0]") print_eval("nt2[0, :]") # Advanced indexing is allowed over tensor dimensions print_eval("nt2") print_eval("nt2[:, :, (1, 0)]") # Advanced indexing using binary mask print_eval("nt2") ind = torch.tensor(((1, 0), (0, 1))) print_eval("ind") print_eval("nt2[:, :, ind]") # Ellipsis print_eval("nt2") print_eval("nt2[:, :, ..., 0]") print("$ nt2[..., 0]") try: nt2[..., 0] except RuntimeError as e: print(str(e))
examples/indexing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spam Detection Model using Natural Language Processing # ## Import the Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # ## Import the data sms_data = pd.read_csv("C:\\Users\\<NAME>\\Downloads\\SpamClassifier-master\\SpamClassifier-master\\smsspamcollection\\smsspamcollection", sep='\t',names=["label", "message"]) sms_data.head(10) # ### Creating a binary column for label sms_data["bin_form"] = sms_data["label"].apply(lambda x: 1 if x == "ham" else 0) sms_data.head(10) # ### Count of labels in the dataset sms_data["label"].value_counts() plt.hist(sms_data["label"], facecolor="peru", edgecolor='blue' ) plt.xlabel("Sms Category") plt.ylabel("Count of Category") # ## Import the Regex and NLTK libraries to perform Data Preprocessing import re import nltk # ## Remove all the stopwords from messages nltk.download('stopwords') from nltk.corpus import stopwords def cleaning(st): s = re.sub('[^a-zA-Z]', ' ', st) s_lower = s.lower() s_lower = s_lower.split() return s_lower temp = sms_data["message"].apply(cleaning) temp without_stopwords = temp.apply(lambda x: [i for i in x if not i in stopwords.words('english')]) without_stopwords[0] # ## Perform the Stemming to get the root form of words from nltk.stem.porter import PorterStemmer port_stem = PorterStemmer() def stem_word(st): s = [port_stem.stem(i) for i in st] s = ' '.join(s) return s stemmed_msg = without_stopwords.apply(stem_word) stemmed_msg[0] # ## Build Bag of Words # ### Import the countvectorizer to get vectors of data and user max_df as 20% as percentage of spam data is 20% from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer(max_df=0.20) # ## Fit the vetcore model to get the traget to be train x = cv.fit_transform(stemmed_msg).toarray() y = sms_data["bin_form"] # ## Split the dataset to get the train test data from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 1) # ## Using the Naive Bayes model to classify the message from sklearn.naive_bayes import MultinomialNB model = MultinomialNB() # ### Fit the model model.fit(x_train, y_train) # ### Predict the label for message y_pred = model.predict(x_test) # ## Using the confusion matrix to evaluate the model from sklearn.metrics import confusion_matrix matric = confusion_matrix(y_test, y_pred) matric # ### Using accuracy matric to qualify the model from sklearn.metrics import accuracy_score accuracy = accuracy_score(y_test, y_pred) accuracy # ### Conclusion: As per the achieved accuracy of model, we can qualify the model for Spam Detection.
Sms_Fraud_Detection_NLP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.10 64-bit (''base'': conda)' # name: python3 # --- # + from github import Github g = Github('') # + repo_jupyter = g.get_repo('microsoft/vscode-jupyter') for l in repo_jupyter.get_labels(): if (l.color == 'c5def5'): print(l.name) # + repo_jupyter_internal = g.get_repo('microsoft/vscode-jupyter-internal') for l in repo_jupyter_internal.get_labels(): if (l.color == 'c5def5'): print(l.name) # + repo_vscode = g.get_repo('microsoft/vscode') for l in repo_vscode.get_labels(): if (l.color == 'c5def5' and l.name.startswith('notebook-') or l.name.startswith('interactive-window')): print(l.name) # - repo_jupyter.get_issues()
.vscode/notebooks/GithubLabels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.5 # language: python # name: python3 # --- # # get-data # This notebook collects the sample data from the [Github Link](https://github.com/IBMDevConnect/RBSHack2018/raw/master/hackdata/hack_data_v1.zip) and imports it into the Watson Studio project. # + # Add the project access token here. For more info, see this blog: # https://medium.com/ibm-data-science-experience/control-your-dsx-projects-using-python-c69e13880312 # + import requests, zipfile url = 'https://github.com/IBMDevConnect/RBSHack2018/raw/master/hackdata/hack_data_v1.zip' target_path = 'hack_data_v1.zip' response = requests.get(url, stream=True) handle = open(target_path, "wb") for chunk in response.iter_content(chunk_size=512): if chunk: # filter out keep-alive new chunks handle.write(chunk) handle.close() # - with zipfile.ZipFile(target_path) as zf: zf.extractall() for filename in ['test_indessa.csv', 'train_indessa.csv']: with open(filename, 'rb') as file: project.save_data(data=file,file_name=filename,overwrite=True) # print(zf.namelist()) # for filename in ['test_indessa.csv', 'train_indessa.csv']: # try: # data = zf.read(filename) # except KeyError: # print('ERROR: Did not find %s in zip file' % filename) # else: # print(filename, ':') # project.save_data(data=repr(data),file_name=filename,overwrite=True) # print() # !head test_indessa.csv
get-data-clean.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="ib_45Kw92CwR" outputId="f8fe4103-3702-40b3-e9eb-d75e3a489b13" pip install transformers # + [markdown] id="Sr3r1xqn2lyd" # **Função de Cálculo de Score** # + id="zoLDQ0n-MNEp" def calc_score_tarefa1(text, target_word, tokenizer, model, debug=False): tokenized_text = tokenizer.tokenize(text) indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text) #get indexs target_index = tokenizer.convert_tokens_to_ids([target_word])[0] masked_index = tokenized_text.index('[MASK]') # Create the segments tensors. segments_ids = [0] * len(tokenized_text) # Convert inputs to PyTorch tensors tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) # Predict all tokens with torch.no_grad(): predictions = model(tokens_tensor, segments_tensors) # get words again (just to compare the confidence) expected_token = tokenizer.convert_ids_to_tokens([target_index])[0] # normalise between 0 and 1 predictions_candidates = torch.sigmoid(predictions[0][0][masked_index]) predicted_index = torch.argmax(predictions_candidates).item() predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0] # if word dont exist return 0 if predicted_token == '[UNK]': return 0 predictions_candidates = predictions_candidates.cpu().numpy() target_bert_confiance = predictions_candidates[target_index] predicted_bert_confience = predictions_candidates[predicted_index] # score = 1 - (target_bert_confiance-predicted_bert_confience if target_bert_confiance > predicted_bert_confience else predicted_bert_confience-target_bert_confiance) if debug: print("predicted token ---> ", predicted_token, predicted_bert_confience) print("expected token ---> ",expected_token , target_bert_confiance) print("Score:", score) return score # + colab={"base_uri": "https://localhost:8080/"} id="oLE5MbhbwXS-" outputId="0fa6e9bb-7423-4352-a9b5-170ee76ed3b9" import torch from transformers import BertTokenizer, BertModel, BertForMaskedLM # OPTIONAL: if you want to have more information on what's happening, activate the logger as follows #import logging #logging.basicConfig(level=logging.INFO) # Load pre-trained model tokenizer (vocabulary) tokenizer = BertTokenizer.from_pretrained('neuralmind/bert-base-portuguese-cased') # Load pre-trained model (weights) model = BertForMaskedLM.from_pretrained('neuralmind/bert-base-portuguese-cased') model.eval() text = '[CLS] Especificamente, a capacidade impressionante desses insetos de se espremerem por qualquer espaço e aguentarem [MASK] de até 900 vezes seu próprio peso sem sofrer grandes danos [SEP]' #text = '[CLS] Chegamos na metade da temporada! Você disputou [MASK] contra todas as equipes do grupo uma vez e a agora inicia-se o returno [SEP]' target_word = 'pressões' # calculate score score = calc_score_tarefa1(text, target_word, tokenizer, model, debug=True) # print("Score: ", score)a1(text, target_word, tokenizer, model, debug=True) print("Score:",score) # + [markdown] id="qW5Itb_Nyc5K" # **BERT - Buscando o Contexto : Tarefa 1** # - Metodo calc_score_tarefa1
Aula4_BERT_Tarefa_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Behavioral Cloning** # # ## Writeup Template # # --- # # **Behavioral Cloning Project** # # The goals / steps of this project are the following: # * Use the simulator to collect data of good driving behavior # * Build, a convolution neural network in Keras that predicts steering angles from images # * Train and validate the model with a training and validation set # * Test that the model successfully drives around track one without leaving the road # * Summarize the results with a written report # # # ## Rubric Points # Here I will consider the [rubric points](https://review.udacity.com/#!/rubrics/432/view) individually and describe how I addressed each point in my implementation. # # --- # ### Files Submitted & Code Quality # # #### 1. Submission includes all required files and can be used to run the simulator in autonomous mode # # My project includes the following files: # * model.py: code to load data, build and train model # * drive.py: code for driving the car in autonomous mode # * The only changed I've made to the orginal drive.py script was changing the `set_speed` parameter from `9` to `30`, mostly to speed up iteration speed. # * model.h5: final model # * In addition, in the folder `nvidia_12_working/` there are the checkpoints of this model at each epoch during training. # * video.mp4: video recording of the car driving track 1 for two laps, using `model.h5` # * writeup_report.md: write up of this project. You are reading this. # # #### 2. Submission includes functional code # Using the Udacity provided simulator and my drive.py file, the car can be driven autonomously around the track by executing # ```sh # python drive.py model.h5 # ``` # # #### 3. Submission code is usable and readable # # The `model.py` file contains the code for training and saving the convolution neural network. The file shows the pipeline I used for training and validating the model, and it contains comments to explain how the code works. # # To run `model.py`, use the following command line arguments: # ```sh # python model.py \ # --model=nvidia \ # --model_dir=12 \ # --epochs=10 \ # --data_dirs="../beta_simulator_mac/data/center_driving_1/driving_log.csv" \ # --data_dirs="../beta_simulator_mac/data/center_driving_2/driving_log.csv" \ # --data_dirs="../beta_simulator_mac/data/center_driving_reverse_1/driving_log.csv" \ # --data_dirs="../beta_simulator_mac/data/curves_1/driving_log.csv" # ``` # # `model`: one of "vgg" or "nvidia", the two models implemented. # # `model_dir`: specific a name for the directory to store the trained model. # # `epochs`: number of epochs to train the model. # # `data_dirs`: a list of `driving_log.csv` files that we want to train the model on; can pass this argument multiple times to train on multiple files. # # # ### Model Architecture and Training Strategy # # #### 1. An appropriate model architecture has been employed # # My (final, best performaning) model is based on this [NVIDIA convolutional neutral network model](https://devblogs.nvidia.com/deep-learning-self-driving-cars/). # # Here's a summary of the model architecture: # # ``` # Model: "sequential_1" # _________________________________________________________________ # Layer (type) Output Shape Param # # ================================================================= # lambda_1 (Lambda) (None, 160, 320, 3) 0 # _________________________________________________________________ # cropping2d_1 (Cropping2D) (None, 90, 320, 3) 0 # _________________________________________________________________ # conv2d_1 (Conv2D) (None, 43, 158, 24) 1824 # _________________________________________________________________ # conv2d_2 (Conv2D) (None, 20, 77, 36) 21636 # _________________________________________________________________ # dropout_1 (Dropout) (None, 20, 77, 36) 0 # _________________________________________________________________ # conv2d_3 (Conv2D) (None, 8, 37, 48) 43248 # _________________________________________________________________ # conv2d_4 (Conv2D) (None, 6, 35, 64) 27712 # _________________________________________________________________ # conv2d_5 (Conv2D) (None, 4, 33, 64) 36928 # _________________________________________________________________ # dropout_2 (Dropout) (None, 4, 33, 64) 0 # _________________________________________________________________ # flatten_1 (Flatten) (None, 8448) 0 # _________________________________________________________________ # dense_1 (Dense) (None, 128) 1081472 # _________________________________________________________________ # dense_2 (Dense) (None, 64) 8256 # _________________________________________________________________ # dense_3 (Dense) (None, 32) 2080 # _________________________________________________________________ # dense_4 (Dense) (None, 1) 33 # ================================================================= # Total params: 1,223,189 # Trainable params: 1,223,189 # Non-trainable params: 0 # ``` # # Additionally, I've also implemented and trained a model with VGG architecture. However, despite being a much larger model (about 10x the number of parameters of the NVIDIA model), this model performs significantly worse. It also takes much longer to train. My hypotheses are that 1) VGG is prone to overfitting when applied to this problem as the input image space is relatively homogenous, and 2) the extra pooling and dropout layers added too much noise and made it difficult for gradients to backpropogate through the network. # # #### 2. Attempts to reduce overfitting in the model # # There are three main techniques applied to prevent overfitting: # * Two Dropout layers # * Early stopping in training # * Attempts are made to collect a comprehensive and diverse set of training examples (more on this below). # # # #### 3. Model parameter tuning # # The model used an adam optimizer, so the learning rate was not tuned manually. # # Other parameters of the model were tuned manually, by looking at the driving performance of resulting models. # # # #### 4. Appropriate training data # # I generated training data from the following simulator runs on track 1: # * Three laps of my "best effort" driving # * Two laps of my "best effort" driving, in reverse direction # * Additional runs for a couple of sharp that the model had trouble navigating initally # # I also used the following techniques to augment training data: # * Flipping images and steering angles # * Using images from left and right cameras # * This turned out to be particularly effective in teaching the model how to recover when its trajectory deviates from the center of the lane. # * I set the correction angle to be 0.15, based on some manual tuning. # # # # ### Model Architecture and Training Strategy # # #### 1. Solution Design Approach # # I first developed the data input / model output modules, using a very simple model as a placeholder, in order to ensure that the pipeline is functional from end to end. This model performed miserably but that is as expected. # # I then beefed up my model by going to the VGG architecture. As discussed above, this appears to have overshot the target as the model is very slow to train, and more importantly the trained model performs badly on the track. The car would frequently go off track, fall into water, etc. # # Therefore I decided to use a simpler mode, i.e. the NVIDIA model introduced in the lectures. This model has done wonders. Within a few epochs of training the car was able to navigate large portions of the track. # # I then augmented the training data by leveraging images from left and right cameras - again as discussed above, this proved to be a critical step in teaching the car how to recover when it begins to go off track. After this the car was above to drive around track 1 smoothly. # # I added a few dropout layers to the original NVIDIA model to prevent overfitting. This did not have a noticable impact on driving performance on track 1, but (ostensibly) may make the model more generalizable to other scenarios. # # # #### 2. Final Model Architecture # # See the "1. An appropriate model architecture has been employed" section above for a detailed description of the model architecture. # # #### 3. Creation of the Training Set & Training Process # # For training data creation see section " 4. Appropriate training data" above. # # I applied early stopping in training, and set the `restore_best_weights` parameter to `True` so that Keras automatically updates the final model weight to that from the best performing epoch. # # One interesting thing I observed is that below a certain threshold, validation loss does not correlate strongly with driving performance on the tracks. More specifically, the model from some epoch could a higher validation loss than one from another epoch, nonetheless its actual driving behavior on the track appears to be smoother, and more natural to how a human would drive the vehicle. My hypothesis is that, unlike e.g. image classification, there are multiple ways to control a car so that it does the right thing on the tracks. Turning right one fraction of a second sooner or later probably does not matter all that much. Therefore in this case the loss function functions more as a proxy metric, rather than the true task-success objective function # # from keras.models import load_model model = load_model('model.h5') model.summary()
Tong writeup draft.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Traveling Companions # # In this activity you will be taking three separate csvs that were gathered by Tourism Malaysia, merging them together, and then creating charts to visualize a country's change of traveling companions to Malaysia over the course of three years. # # ### Part 1 - Merging Companions # # * You will likely need to perform two different merges over the course of this activity, changing the names of your columns as you go along. import matplotlib.pyplot as plt import pandas as pd import numpy as np # + # Take in all of our traveler data and read it into pandas travel_2016 = "../Resources/2016_travelers.csv" travel_2017 = "../Resources/2017_travelers.csv" travel_2018 = "../Resources/2018_travelers.csv" travel_2016_df = pd.read_csv(travel_2016) travel_2017_df = pd.read_csv(travel_2017) travel_2018_df = pd.read_csv(travel_2018) # - # Merge the first two datasets on "COUNTRY OF NATIONALITY" so that no data is lost (should be 44 rows) combined_travel_df = pd.merge(travel_2016_df, travel_2017_df, how='outer', on='COUNTRY OF NATIONALITY') combined_travel_df.head() # + # Rename our _x columns to "2016 Alone", "2016 With Spouse", "2016 With Children", "2016 With Family/Relatives", # "2016 Student Group", "2016 With Friends", "2016 With Business Associate", "2016 With Incentive Group", # and "2016 Others" combined_travel_df = combined_travel_df.rename(columns={"ALONE_x":"2016 Alone", "WITH SPOUSE_x":"2016 With Spouse", "WITH CHILDREN_x":"2016 With Children", "WITH FAMILY/RELATIVES_x":"2016 With Family/Relatives", "STUDENT GROUP_x":"2016 Student Group", "WITH FRIENDS_x":"2016 With Friends", "WITH BUSINESS ACCOCIATE_x":"2016 With Business Associate", "WITH INCENTIVE GROUP_x":"2016 With Incentive Group", "OTHERS_x":"2016 Others"}) # Rename our _y columns to "2016 Alone", "2016 With Spouse", "2016 With Children", "2016 With Family/Relatives", # "2016 Student Group", "2016 With Friends", "2016 With Business Associate", "2016 With Incentive Group", # and "2016 Others" combined_travel_df = combined_travel_df.rename(columns={"ALONE_y":"2017 Alone", "WITH SPOUSE_y":"2017 With Spouse", "WITH CHILDREN_y":"2017 With Children", "WITH FAMILY/RELATIVES_y":"2017 With Family/Relatives", "STUDENT GROUP_y":"2017 Student Group", "WITH FRIENDS_y":"2017 With Friends", "WITH BUSINESS ACCOCIATE_y":"2017 With Business Associate", "WITH INCENTIVE GROUP_y":"2017 With Incentive Group", "OTHERS_y":"2017 Others"}) combined_travel_df.head() # - # Merge our newly combined dataframe with the 2018 dataframe combined_travel_df = pd.merge(combined_travel_df, travel_2018_df, how="outer", on="COUNTRY OF NATIONALITY") combined_travel_df # + # Rename "ALONE", "WITH SPOUSE", "WITH CHILDREN", "WITH FAMILY/RELATIVES", "STUDENT GROUP", "WITH FRIENDS", # "WITH BUSINESS ACCOCIATE","WITH INCENTIVE GROUP", "OTHERS" to # "2018 Alone", "2018 With Spouse", "2018 With Children", "2018 With Family/Relatives", "2018 Student Group", # "2018 With Friends", "2018 With Business Associate", "2018 With Incentive Group", and "2018 Others" combined_travel_df = combined_travel_df.rename(columns={"ALONE":"2018 Alone", "WITH SPOUSE":"2018 With Spouse", "WITH CHILDREN":"2018 With Children", "WITH FAMILY/RELATIVES":"2018 With Family/Relatives", "STUDENT GROUP":"2018 Student Group", "WITH FRIENDS":"2018 With Friends", "WITH BUSINESS ACCOCIATE":"2018 With Business Associate", "WITH INCENTIVE GROUP":"2018 With Incentive Group", "OTHERS":"2018 Others"}) combined_travel_df.head() # - # ### Part 2 # Check the mean of the columns combined_travel_df.mean() # + # Reduce columns where mean of traveling companions is > 1 across all years travel_reduced = pd.DataFrame(combined_travel_df[["COUNTRY OF NATIONALITY", "2016 Alone","2016 With Spouse","2016 With Children", "2016 With Family/Relatives","2016 With Friends", "2016 With Business Associate","2017 Alone", "2017 With Spouse","2017 With Children", "2017 With Family/Relatives","2017 With Friends", "2017 With Business Associate","2018 Alone", "2018 With Spouse","2018 With Children", "2018 With Family/Relatives","2018 With Friends", "2018 With Business Associate"]]) # Set index to "Country of Nationality" travel_reduced = travel_reduced.set_index("COUNTRY OF NATIONALITY") travel_reduced # - # ### Part 3 - Charting Traveling Companions # # * Create 3 variables with inputs that asks the user what country they would like to chart. # # * Ask the user what type of traveling companion they would like to compare for their chosen countries. # # * Store each country's percentage of travelers for the chosen traveling companion over time in 3 variables (one for each country) # # * Create a line chart that will plot the comparison of each country's percentage of travelers with the chosen traveling companion from 2016 to 2018 # Collect the user's input to search through our data frame country1 = input("What country would you like to chart 1st? ") country2 = input("What country would you like to chart 2nd? ") country3 = input("What country would you like to chart 3rd? ") # + # Ask type of traveling companion select_options = "1 - Alone\n2 - With Spouse\n3 - With Children\n4 - With Family/Relatives\n5 - With Friends\n6 - With Business Associate" print("What type of traveling companion would you like to chart? ") print(select_options) traveler_type = int(input("Choose a number: ")) # Boolean to check if number was correctly chosen selected = False # Loop through options to set column name according to chosen number while selected == False: if traveler_type == 1: columns_to_compare = "Alone" selected = True elif traveler_type == 2: columns_to_compare = "With Spouse" selected = True elif traveler_type == 3: columns_to_compare = "With Children" selected = True elif traveler_type == 4: columns_to_compare = "With Family" selected = True elif traveler_type == 5: columns_to_compare = "With Family/Relatives" selected = True elif traveler_type == 6: columns_to_compare = "With Business Associate" selected = True else: # Incorrect input, try again print("Please make your selection again.") print(select_options) traveler_type = int(input("Choose a number: ")) print("You selected option " + str(traveler_type) + " - " + columns_to_compare) # + # Create a Series for each chosen country that looks for the chosen travel companion from 2016 to 2018 country1_traveler_over_time = travel_reduced.loc[country1.upper(), [f"2016 {columns_to_compare}",f"2017 {columns_to_compare}", f"2018 {columns_to_compare}"]] country2_traveler_over_time = travel_reduced.loc[country2.upper(), [f"2016 {columns_to_compare}",f"2017 {columns_to_compare}", f"2018 {columns_to_compare}"]] country3_traveler_over_time = travel_reduced.loc[country3.upper(), [f"2016 {columns_to_compare}",f"2017 {columns_to_compare}", f"2018 {columns_to_compare}"]] # + # Create a list of the years that we will use as our x axis years = [2016,2017,2018] # Plot our line that will be used to track the first country's traveling companion percentage over the years plt.plot(years, country1_traveler_over_time, color="green", label=country1) # Plot our line that will be used to track the second country's traveling companion percentage over the years plt.plot(years, country2_traveler_over_time, color="blue", label=country2) # Plot our line that will be used to track the third country's traveling companion percentage over the years plt.plot(years, country3_traveler_over_time, color="orange", label=country3) # Place a legend on the chart in what matplotlib believes to be the "best" location plt.legend(loc="best") plt.title("Traveling " + columns_to_compare + " Country Comparison") plt.xlabel("Years") plt.xticks(np.arange(min(years), max(years)+1, 1.0)) plt.ylabel("% Travelers") # Print our chart to the screen plt.show() # -
01-Lesson-Plans/05-Matplotlib/2/Activities/10-Stu_Travel-Part3/Solved/traveling_companions_bonus.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks. # - Author: <NAME> # - GitHub Repository: https://github.com/rasbt/deeplearning-models # %load_ext watermark # %watermark -a '<NAME>' -v -p torch # - Runs on CPU or GPU (if available) # # Model Zoo -Standardizing Images # This notebook provides an example for working with standardized images, that is, images where the image pixels in each image has mean zero and unit variance across the channel. # # The general equation for z-score standardization is computed as # # $$x' = \frac{x_i - \mu}{\sigma}$$ # # where $\mu$ is the mean and $\sigma$ is the standard deviation of the training set, respectively. Then $x_i'$ is the scaled feature feature value, and $x_i$ is the original feature value. # # I.e, for grayscale images, we would obtain 1 mean and 1 standard deviation. For RGB images (3 color channels), we would obtain 3 mean values and 3 standard deviations. # ## Imports # + import time import numpy as np import torch import torch.nn.functional as F from torchvision import datasets from torchvision import transforms from torch.utils.data import DataLoader if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True # - # ## Settings and Dataset # + ########################## ### SETTINGS ########################## # Device device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Hyperparameters random_seed = 1 learning_rate = 0.05 num_epochs = 10 batch_size = 128 # Architecture num_classes = 10 # - # ### Compute the Mean and Standard Deviation for Normalization # First, we need to determine the mean and standard deviation for each color channel in the training set. Since we assume the entire dataset does not fit into the computer memory all at once, we do this in an incremental fashion, as shown below. # + ############################## ### PRELIMINARY DATALOADER ############################## train_dataset = datasets.MNIST(root='data', train=True, transform=transforms.ToTensor(), download=True) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False) train_mean = [] train_std = [] for i, image in enumerate(train_loader, 0): numpy_image = image[0].numpy() batch_mean = np.mean(numpy_image, axis=(0, 2, 3)) batch_std = np.std(numpy_image, axis=(0, 2, 3)) train_mean.append(batch_mean) train_std.append(batch_std) train_mean = torch.tensor(np.mean(train_mean, axis=0)) train_std = torch.tensor(np.mean(train_std, axis=0)) print('Mean:', train_mean) print('Std Dev:', train_std) # - # **Note that** # # - For RGB images (3 color channels), we would get 3 means and 3 standard deviations. # - The transforms.ToTensor() method converts images to [0, 1] range, which is why the mean and standard deviation values are below 1. # ### Standardized Dataset Loader # Now we can use a custom transform function to standardize the dataset according the the mean and standard deviation we computed above. custom_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=train_mean, std=train_std)]) # + ########################## ### MNIST DATASET ########################## # Note transforms.ToTensor() scales input images # to 0-1 range train_dataset = datasets.MNIST(root='data', train=True, transform=custom_transform, download=True) test_dataset = datasets.MNIST(root='data', train=False, transform=custom_transform) train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # - # Check that the dataset can be loaded: # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # For the given batch, check that the channel means and standard deviations are roughly 0 and 1, respectively: print('Channel mean:', torch.mean(images[:, 0, :, :])) print('Channel std:', torch.std(images[:, 0, :, :])) # ## Model # + ########################## ### MODEL ########################## class ConvNet(torch.nn.Module): def __init__(self, num_classes): super(ConvNet, self).__init__() # calculate same padding: # (w - k + 2*p)/s + 1 = o # => p = (s(o-1) - w + k)/2 # 28x28x1 => 28x28x4 self.conv_1 = torch.nn.Conv2d(in_channels=1, out_channels=4, kernel_size=(3, 3), stride=(1, 1), padding=1) # (1(28-1) - 28 + 3) / 2 = 1 # 28x28x4 => 14x14x4 self.pool_1 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) # (2(14-1) - 28 + 2) = 0 # 14x14x4 => 14x14x8 self.conv_2 = torch.nn.Conv2d(in_channels=4, out_channels=8, kernel_size=(3, 3), stride=(1, 1), padding=1) # (1(14-1) - 14 + 3) / 2 = 1 # 14x14x8 => 7x7x8 self.pool_2 = torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0) # (2(7-1) - 14 + 2) = 0 self.linear_1 = torch.nn.Linear(7*7*8, num_classes) def forward(self, x): out = self.conv_1(x) out = F.relu(out) out = self.pool_1(out) out = self.conv_2(out) out = F.relu(out) out = self.pool_2(out) logits = self.linear_1(out.view(-1, 7*7*8)) probas = F.softmax(logits, dim=1) return logits, probas torch.manual_seed(random_seed) model = ConvNet(num_classes=num_classes) model = model.to(device) optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # - # ## Training # + def compute_accuracy(model, data_loader): correct_pred, num_examples = 0, 0 for features, targets in data_loader: features = features.to(device) targets = targets.to(device) logits, probas = model(features) _, predicted_labels = torch.max(probas, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 start_time = time.time() for epoch in range(num_epochs): model = model.train() for batch_idx, (features, targets) in enumerate(train_loader): features = features.to(device) targets = targets.to(device) ### FORWARD AND BACK PROP logits, probas = model(features) cost = F.cross_entropy(logits, targets) optimizer.zero_grad() cost.backward() ### UPDATE MODEL PARAMETERS optimizer.step() ### LOGGING if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %03d/%03d | Cost: %.4f' %(epoch+1, num_epochs, batch_idx, len(train_loader), cost)) model = model.eval() print('Epoch: %03d/%03d training accuracy: %.2f%%' % ( epoch+1, num_epochs, compute_accuracy(model, train_loader))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) # - # ## Evaluation print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))) # %watermark -iv
pytorch_ipynb/cnn/cnn-standardized.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from netCDF4 import Dataset # %matplotlib inline from matplotlib.colors import LogNorm import os os.environ['PROJ_LIB'] = 'C:/Users/conta/Anaconda3/Lib/site-packages/mpl_toolkits/basemap' from mpl_toolkits.basemap import Basemap from pylab import rcParams f = Dataset('Data/no2.nc','r') print(f.groups) print(f.groups['PRODUCT']) print(f.groups['PRODUCT'].variables.keys()) lon = f.groups['PRODUCT'].variables['longitude'][:][0,:,:] lat = f.groups['PRODUCT'].variables['latitude'][:][0,:,:] no2 = f.groups['PRODUCT'].variables['nitrogendioxide_tropospheric_column_precision'][0,:,:] print (lon.shape) print (lat.shape) print (no2.shape) # + no2_unit = f.groups['PRODUCT'].variables['nitrogendioxide_tropospheric_column_precision'].units rcParams['figure.figsize'] = 10,10 lon_0 = lon.mean() lat_0 = lat.mean() m = Basemap(width=5000000,height=3500000,resolution='i',projection='stere',lat_ts=40,lat_0=lat_0,lon_0=lon_0) x, y = m(lon, lat) cs = m.pcolor(x,y,np.squeeze(no2),norm=LogNorm(), cmap='jet') m.drawparallels(np.arange(-80., 81., 10.), labels=[1,0,0,0], fontsize=10) m.drawmeridians(np.arange(-180., 181., 10.), labels=[0,0,0,1], fontsize=10) m.drawcoastlines() m.drawstates() m.drawcountries() cbar = m.colorbar(cs, location='bottom', pad="10%") cbar.set_label(no2_unit) plt.title('NO2 in atmosphere') plt.show() # -
Detect/NO2_check.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] tags=[] # # **Advanced Data Science for Innovation - Assignment 1** # # + [markdown] tags=[] # ## NBA Career Prediction: Predict 5-Year Longevity for NBA Rookies # **Student Name:** <NAME> # # **Team Name:** Group 1 # * <NAME> # * <NAME> # * <NAME> # * <NAME> # * <NAME> # + [markdown] tags=[] # ## Environment Setup # - import pandas as pd import numpy as np from importlib.machinery import SourceFileLoader dataprep = SourceFileLoader('sets', '../src/data/prepare.py').load_module() sets = SourceFileLoader('sets', '../src/data/sets.py').load_module() base = SourceFileLoader('base', '../src/models/null.py').load_module() from IPython.display import display from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier import xgboost as xgb from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn.metrics import roc_auc_score from sklearn.model_selection import GridSearchCV,StratifiedKFold import joblib as job from sklearn.preprocessing import MinMaxScaler # + def score_base(y_train_preds, y_train, y_val_preds, y_val, f1_average='weighted'): name = 'Base' model_scores = [] t_acc = accuracy_score(y_train, y_train_preds) t_prec = precision_score(y_train, y_train_preds) t_rec = recall_score(y_train, y_train_preds) t_f1 = f1_score(y_train, y_train_preds, average=f1_average) #t_auc = roc_auc_score(y_t, clf.predict_proba(X_t)[:, 1]) v_acc = accuracy_score(y_val, y_val_preds) v_prec = precision_score(y_val, y_val_preds) v_rec = recall_score(y_val, y_val_preds) v_f1 = f1_score(y_val, y_val_preds, average=f1_average) #v_auc = roc_auc_score(y_v, clf.predict_proba(X_v)[:, 1]) model_scores.append([name, t_acc, t_prec, t_rec, t_f1, v_acc, v_prec, v_rec, v_f1]) df_model_scores = pd.DataFrame (model_scores, columns = ['model','t_accuracy','t_precision','t_recall','t_F1','v_accuracy','v_precision','v_recall','v_F1']) display(df_model_scores) def fit_score_models(models, X_t, y_t, X_v, y_v, dump_model="NO"): model_scores = [] best_acc = 0 i = 0 for name, model in models.items(): i = i+1; clf = model if dump_model == "YES": job.dump(clf, "../models/williams_sean-week2_" + name + ".joblib", compress=3) clf.fit(X_t, y_t) t_preds = clf.predict(X_t) t_acc = accuracy_score(y_t, t_preds) if i == 1: best_acc = t_acc best_clf = clf else: if t_acc > best_acc: best_acc = t_acc best_clf = clf t_prec = precision_score(y_t, t_preds) t_rec = recall_score(y_t, t_preds) t_f1 = f1_score(y_t, t_preds) t_auc = roc_auc_score(y_t, clf.predict_proba(X_t)[:, 1]) v_preds = clf.predict(X_v) v_acc = accuracy_score(y_v, v_preds) v_prec = precision_score(y_v, v_preds) v_rec = recall_score(y_v, v_preds) v_f1 = f1_score(y_v, v_preds) v_auc = roc_auc_score(y_v, clf.predict_proba(X_v)[:, 1]) model_scores.append([name, t_acc, t_prec, t_rec, t_f1, t_auc, v_acc, v_prec, v_rec, v_f1, v_auc]) df_model_scores = pd.DataFrame (model_scores, columns = ['model','t_accuracy','t_precision','t_recall','t_F1','t_auc','v_accuracy','v_precision','v_recall','v_F1','v_auc']) display(df_model_scores) return best_clf # + [markdown] tags=[] # # 3. Modelling # - # **<u>Load saved data sets</u>** X_train, y_train, X_val, y_val, X_test, y_test = sets.load_sets(path='../data/processed/') # **<u>Assess Baseline of Train and Validation datasets</u>** base_model = base.NullModel(target_type="classification") y_base_train_preds = base_model.fit_predict(y_train) y_base_val_preds = base_model.fit_predict(y_val) score_base(y_base_train_preds, y_train, y_base_val_preds, y_val) # --- # **<u>Train various models with default parameters to determine which model is the most performant</u>** models_to_fit = {"Logistic Regression": LogisticRegression(random_state=8, solver='liblinear'), "KNN Euclidian": KNeighborsClassifier(metric='euclidean'), "KNN Manhattan": KNeighborsClassifier(metric='manhattan'), "XGBoost": xgb.XGBClassifier(random_state=8, use_label_encoder=False)} clf1 = fit_score_models (models_to_fit, X_train, y_train, X_val, y_val) # *Observations:* # * XGBoost seems to be best performer. Next steps. Tune hyperparameters to reduce overfitting # --- # **<u>Perform a grid serach on XGBoost to determine which hyperparameters result in best performance</u>** # grid search clf1 = xgb.XGBClassifier(use_label_encoder=False) n_estimators = [100, 200, 300, 400, 500] learning_rate = [0.0001, 0.001, 0.01, 0.1] param_grid = dict(learning_rate=learning_rate, n_estimators=n_estimators) kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=8) grid_search = GridSearchCV(clf1, param_grid, scoring="accuracy", n_jobs=-1, cv=kfold) grid_result = grid_search.fit(X_train, y_train) # Print best score and parameters print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_)) # --- # **<u>Train XGBoost with best Hyperparameter and print performance metrics</u>** models_to_fit = {"XGBoost": xgb.XGBClassifier(random_state=8, use_label_encoder=False, learning_rate=0.01, n_estimators=300)} clf2 = fit_score_models (models_to_fit, X_train, y_train, X_val, y_val, "YES") # # Kaggle Submission File df_test = pd.read_csv('../data/raw/test.csv') df_cleaned = df_test.copy() df_cleaned = dataprep.remove_invalid_rows(df_cleaned, ['GP','FT%']) X_test = df_cleaned.copy() X_test = dataprep.drop_features(X_test, ['Id']) X_test = dataprep.scale_features(X_test, MinMaxScaler(), None) X_test.info() test_probs = clf2.predict_proba(X_test)[:, 1] df_kaggle = pd.DataFrame({'Id': df_cleaned['Id'], 'TARGET_5Yrs': test_probs}); df_kaggle.shape df_kaggle.to_csv("../data/external/williams_sean-week2_xgboost-v1.csv", index=False)
notebooks/williams_sean-week2_modelling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: base # language: python # name: base # --- # # Линейная регрессия # $$a(x) = w_0 + w_1 x_1 + ... + w_d x_d$$ # - $w_0$ - свободный коэффициент, сдвиг, bias. Без свободного коэффициента модель гарантированно будет давать нулевой прогноз при нулевых значениях всех признаков, а это ограничивает возможности по подгонке под данные # # # - $w_i$ - веса, коэффициенты, параметры модели # - $d+1$ - количество параметров # # $$a(x) = w_0 + w_1 x_1 + ... + w_d x_d = w_0 + <w,x>$$ # # Для удобства и возможности применять скалярное произведения добавим единичный признак: # # $$a(x) = w_0 * 1 + w_1 x_1 + ... + w_d x_d = <w,x>$$ # ### Пример # $$a(x) = w_0 + w_1 (площадь \_ квартиры) + w_2 (район) + w_3 (расстояние \_ до \_ метро)$$ # #### Что делать, если признаки не числовые (район)? # - район - категориальный признак, нужно закодировать. # - one-hot кодирование, бинарное кодирование # - Вместо одного категориального признака заведём много бинарных. Первый признак - это индикатор того, что исходный категориальный признак равен первой категории, второй признак - равенство второй категории. Это значит, что только один бинарный признак будет равен единице, остальные - нули. # | Район | # | --- | # |Чкаловский| # |Эльмаш| # |Уралмаш| # |Эльмаш| # |Чкаловский| # # | Чкаловский | Эльмаш | Уралмаш | # | --- | --- | --- | # |1|0|0| # |0|1|0| # |0|0|1| # |0|1|0| # |1|0|0| # # # $$a(x) = w_0 + w_1 (площадь) + w_2 (квартира \ находится\ в\ Чкаловском\ р-не?) + w_3 (квартира \ находится\ на\ Эльмаше?) + w_4 (квартира \ находится\ на\ Уралмаше?) $$ # # # #### Что делать, если зависимость нелинейная (расстояние до метро)? # <img src = 'images/underground.png'> # Разобьем на линейные отрезки (новые признаки). # Если расстояние до метро попало в интервал от $t_0$ до $t_1$, то признак равен 1. Остальные признаки попадания в интервал равны 0. # # $$a(x) = w_0 + w_1 *(площадь) + ... + w_3 * [t_0 <= x_3 < t_1] + ... + w_{3+n}[t_{n-1} <= x_3 < t_n]$$ # Модель линейной регрессии применима, если трансформировать все признаки специально под неё, например, использовать one-hot кодирование категориальных признаков или бинаризацию числовых # ## Модель линейной регрессии # $$a(x) = <w, x>$$ # # Запишем задачу оптимизации в матричном виде: # # __Матрица объекты-признаки:__ # $$\begin{pmatrix} x_{11} & x_{12} & \dots & x_{1d} \\ x_{21} & x_{22} & \dots & x_{2d} \\ \dots & \dots & \dots & \dots \\ x_{l1} & x_{l2} & \dots & x_{ld} \end{pmatrix}$$ # Каждая строка соответствует объекту, столбцы - признаки. # # $$\begin{pmatrix} w_1 \\ w_2 \\ \dots \\ w_d \end{pmatrix}$$ # __Применение линейной модели:__ # $$a(x) = <w, x> = w_1 x_1 + \dots + w_d x_d$$ # # __Нужно получить вектор прогнозов на всей обучающей выборке:__ # $$\begin{pmatrix} \sum _{i=1} ^ d w_i x_{1i} \\ \sum _{i=1} ^ d w_i x_{2i} \\ \dots \\ \sum _{i=1} ^ d w_i x_{li} \end{pmatrix}$$ # Первая строка - сумма всех признаков первого объекта с весами. # # __Матричное умножение:__ # $$AB=C$$ # $$c_{ij} = \sum_{p=1}^{k} a_{ip}b_{pj}$$ # _Скалярное умножение i-строки первой матрицы на j-столбец второй матрицы_ # __Результат применения линейной модели к выборке X:__ # $$Xw=\begin{pmatrix} x_{11} & x_{12} & \dots & x_{1d} \\ x_{21} & x_{22} & \dots & x_{2d} \\ \dots & \dots & \dots & \dots \\ x_{l1} & x_{l2} & \dots & x_{ld} \end{pmatrix} * \begin{pmatrix} w_1 \\ w_2 \\ \dots \\ w_d \end{pmatrix}=\begin{pmatrix} \sum _{i=1} ^ d w_i x_{1i} \\ \sum _{i=1} ^ d w_i x_{2i} \\ \dots \\ \sum _{i=1} ^ d w_i x_{li} \end{pmatrix}=\begin{pmatrix} <w, x_1> \\ \dots \\ <w,x_l>\end{pmatrix}$$ # __Cреднеквадратическая ошибка:__ # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(<w,x_i> - y_i)^2 -> min_w}}$$ # __Отклонения прогнозов от ответов:__ # $$Xw-y=\begin{pmatrix} <w, x_1> - y_1 \\ \dots \\ <w,x_l> - y_l\end{pmatrix}$$ # $Xw$ - прогнозы # # $y$ - истинные ответы # # __Средний квадрат отклонения:__ # # # Евклидова норма: # $$||z|| = \sqrt {\sum _{j=1} ^ n z_{j}^2}$$ # Можно возвести в квадрат: # $$||z||^2 = \sum _{j=1} ^ n z_{j}^2$$ # # __MSE в матричном виде:__ # $$\displaystyle {\frac{1}{l} ||Xw-y||^2 = \frac{1}{l} \sum _{i=1}^{l}{(<w,x_i> - y_i)^2 -> min_w}}$$ # # _Умножаем матрицу объекты-признаки $X$ на $w$, вычитаем истинные ответы $y$, берем квадрат евклидовой нормы и делим на $l$_ # # _В numpy: np.square(X.dot(w)-y).mean()_ # # - Берём объект $x_i$, считаем прогноз модели $<w,x_i>$, вычитаем истинный ответ $y_i$, возводим в квадрат, усредняем по всей выборке. Нужно найти $w$, при котором ошибка будет как можно меньше. # ## Обучение линейной регрессии # Требуется найти $w$ # $$Q(w_1, \dots, w_d)=\sum _{i=1} ^{l} (w_1 x_1 + \dots + w_d x_d -y_i)^2$$ # Производная: # $$\displaystyle{\lim_{x \to x_0} \frac{f(x) - f(x_0)}{x - x_0} = f'(x_0)}$$ # <img src = 'images/derivative.png'> # - Зафиксируем точку $x_0$ # - Возьмём точку $x$ рядом с $x_0$ # - Знаменатель - разница между $x$ и $x_0$ # - Числитель - разница между значениями функции в этих точках # - Разделим # # # Узнаем, насколько быстро функция растёт между этими двумя точками. # Если приближать $x$ и $x_0$, то в пределе получим производную функции $f$ в точке $x_0$ # __Геометрический смысл:__ # # Производная - угол наклона касательной, которую можно провести к графику функции в точке $x_0$. Если функция растёт быстро, то касательная проходит под большим углом, производная большая. Если функция почти не растёт, то касательная будет почти горизонтальной, угол близок к 0. # # __Свойство производной:__ если взять точку экстремума (минимум или максимум функции), то производная в ней будет равна 0. # $$f'(x_0)=0$$ # <img src = 'images/derivative2.png'> # # Алгоритм поиска всех минимумов и максимумов функции, если функция дифференцируема (везде есть производная). # - считаем производную # - приравниваем к 0 # - находим корни уравнения # - смотрим, где значение меньше всего (минимум) # # # __Градиент функции:__ # # Градиент - вектор частных производных # # # $$\nabla f(x) = \begin{pmatrix} \frac {\partial f}{\partial x_1} & \dots & \frac {\partial f} {\partial x_d} \end{pmatrix}$$ # # Считаем производную функции в отдельности для каждой переменной $x_1, x_2, \dots, x_d$ (частные производные), составляем вектор из частных производных - градиент (обобщение производной для функции многих переменных). # # <img src='images/gradient.png'> # # Фиксируем точку $x_0$. В какую сторону функция быстрее всего растёт? Функция растёт в направлении градиента. Если градиент взять с минусом, то он покажет в сторону наискорейшего убывания функции. # Если градиент равен нулю, то это экстремум. # # #### Необходимое условие экстремума: # # Если точка $x_0$ - экстремум и в ней существует производная, то $$\nabla f(x_0)=0 $$ # Можно посчитать градиент функции, приравнять нулю, решить систему уравнений. Ответов может быть много. # # - Если функция строго выпуклая, то экстремум один. # # - MSE линейной регрессии - выпуклая функция # # # #### Градиент MSE в матричном виде: # # $$\displaystyle {\nabla \frac{1}{l} ||Xw-y||^2 = \frac{2}{l} X^T (Xw-y)}$$ # #### Аналитическое решение: # Приравняем нулю, решаем систему линейных уравнений: # # $$w=(X^T X)^{-1} X^T y$$ # # - $X^T X$ - ковариационная матрица # # - Если матрица $X^T X$ вырожденная (или почти вырожденная, определитель или собственные значения близки к нулю), то будут проблемы (обратной матрицы не существует). # # - Если признаков много - долго ждать (кубическое время). # # - Если не MSE, то решения в аналитическом виде может не быть # # sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None) # - fit_intercep - нужно ли включать $w_0$, или уже создан единичный признак # ## Переобучение и регуляризация # ### Нелинейная задача # <img src='images/linear1.png'> # $$a(x)=w_0 +w_1 x$$ # Синие точки - выборка, один признак, истинная зависимость - синус # # # Усложним модель, сделаем её полиномиальной, а не линейной. Добавим новые признаки. Получаем хорошую модель. # $$a(x)=w_0 +w_1 x + w_2 x^2 +w_3 x^3 + w_4 x^4$$ # Но качество неидеально, кривая проходит не через все точки. # # <img src='images/linear_sin4.png'> # Добавим еще степеней: # $$a(x)=w_0 +w_1 x + w_2 x^2 +w_3 x^3 + \dots + w_{15} x^{15}$$ # Ошибка такой модели будет намного меньше (на обучающей выборке). # Но модель переобучена. Качество на новых данных будет очень плохим. # # <img src='images/linear2.png'> # #### Симптом переобучения # - Большие коэффициенты, значения весов # - Эмпирическое наблюдение # $$a(x) = 0.5 + 12345678 x - 6661366613 x^2 + \dots$$ # # ### Регуляризация # Будем штрафовать модель за большие веса # # MSE: # $$\displaystyle {Q(a, X) = \frac{1}{l} \sum _{i=1}^{l}{(<w,x_i> - y_i)^2 -> min_w}}$$ # # Добавим __регуляризатор__ (штраф модели за большие веса): # # $$||w||^2= \sum _{j=1} ^d w^2 _j$$ # # Квадрат евклидовой нормы весов (сумма квадратов весов). # # Получим регуляризованный функционал: # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(<w,x_i> - y_i)^2 + \lambda ||w||^2-> min_w}}$$ # # # $\lambda$ - коэффициент регуляризации. Насколько нам важно, чтобы веса были небольшими. Чем больше коэффициент, тем сильнее регуляризация. # # Аналитическое решение: # $$w=(X^T X + \lambda I)^{-1} X^T y$$ # # $\lambda I$ - диагональная матрица # # Гребневая регрессия (Ridge regression) # # sklearn.linear_model.Ridge(alpha=1.0...) # # #### Эффект регуляризации: # $$a(x)=w_0 +w_1 x + w_2 x^2 +w_3 x^3 + \dots + w_{15} x^{15}$$ # # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(a(x_i) - y_i)^2 + 0.01 ||w||^2-> min_w}}$$ # # <img src=images/linear3.png> # # Увеличим коэффициент регуляризации # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(a(x_i) - y_i)^2 + 1 ||w||^2-> min_w}}$$ # # <img src=images/linear4.png> # # # При слишком большом коэффициенте регуляризации модели проще иметь нулевые веса, чем пытаться уменьшить ошибку # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(a(x_i) - y_i)^2 + 100 ||w||^2-> min_w}}$$ # # <img src=images/linear5.png> # $\lambda$ - гиперпараметр, его необходимо подбирать по отложенной выборке или кросс-валидации. Если подбирать по обучающей, то он будет равен 0. # #### LASSO (Least Absolute Shrinkage and Selection Operator) # - используем не евклидову норму, а модули весов # - некоторые веса зануляются # - приводит к отбору признаков # - чем больше $\lambda$, тем больше весов будут нулевыми # # $$\displaystyle {\frac{1}{l} \sum _{i=1}^{l}{(<w, x_i> - y_i)^2 + \lambda \sum ^{d} _{j=1} |w_j|-> min_w}}$$ # # # #### Регуляризаторы # # $||z||_2 = \sqrt {\sum _{j=1} ^d z^2 _j}$ - $L_2$-норма, евклидова # # $||z||_1 = \sqrt {\sum _{j=1} ^d |z _j|}$ - $L_1$-норма, манхэттенская # ### Интерпретация линейных моделей # Предсказание стоимости квартиры # $$a(x) = 100000 * (площадь) + 500000 * (число\_магазинов\_рядом) + 100 * (средний\_доход\_жильцов\_дома)$$ # # Какой признак важнее? # # Чем больше вес, тем важнее признак? Да, если признаки масштабированы. # # #### Масштабирование признаков # Вычислим среднее и стандартное отклонение признака на обучающей выборке: # # $$\mu_j = \frac{1}{l} \sum _{i=1} ^{l} x_i ^j$$ # # $$\sigma_j = \sqrt{\frac{1}{l} \sum _{i=1} ^{l} (x_i ^j - \mu_j)^2}$$ # # Вычтем из каждого значения признака среднее и поделим на стандартное отклонение: # # $$x_i ^j = \frac{x_i ^j - \mu _j}{\sigma_j}$$ # # # Или можно масштабировать на отрезок 0-1 (вычесть минимальное значение признака и поделить на разницу между максимальным и минимальным) # # Домашнее задание # ## Задача 1 # Построить модель линейной регрессии. # - линейная модель # - полином второй степени # - полином третьей степени # - экспонента # # # Построить графики получившихся функций (линейная, квадратичная, кубическая, экспонента) и тренировочной выборки. # - Посчитать MSE для каждого случая # - Выбрать наилучший вариант (без тестовой выборки) # # # Для решения можете использовать np.linalg.lstsq, np.polyfit, np.poly1d, scipy.stats.linregress, scipy.optimize.curve_fit, sklearn.linear_model.LinearRegression, statsmodels, np.linalg.solve (аналитически) или с помощью стохастического градиентного спуска (написать самостоятельно). import numpy as np import matplotlib.pyplot as plt y = np.array([12.19, 8.41, 14.68, 8.64, 32.94, 22.61, 45.92, 23.63, 18.59, 36.22, 50.10, 46.22, 23.63, 47.30, 40.03, 56.53, 38.41, 51.47, 6.29, 35.41, 67.79, 74.21, 79.12, 45.10]) x = np.linspace(0, 1, num = 24) plt.scatter(x, y); # Пример решения для полинома второй степени: # $$y (x) = w_{0}+w_{1}x +w_{2}x^2$$ # # \begin{equation*} # \begin{vmatrix} # y_{1} \\ # y_{2} \\ # . \\ # y_{N} \\ # \end{vmatrix}= # \begin{vmatrix} # 1 & x_{1} & x_{1}^{2}\\ # 1 & x_{2} & x_{2}^{2}\\ # . & . & .\\ # 1 & x_{N} & x_{N}^{2}\\ # \end{vmatrix} # \begin{vmatrix} # w_{0} \\ # w_{1} \\ # w_{2} # \end{vmatrix} # \end{equation*} X = list(zip(np.ones(24), x, x*x)) W = np.linalg.lstsq(X, y, rcond=None)[0] W def f(x, w): return w[0] + w[1]*x + w[2]*x*x f(x, W) plt.scatter(x, y, label='train set') plt.plot(x, f(x, W), label='quad lstsq') plt.legend(); from sklearn.metrics import mean_squared_error mean_squared_error(y, f(x, W)) # ## Задача 2 # # Данные: https://scikit-learn.org/stable/datasets/toy_dataset.html#diabetes-dataset # # 1. Разделите данные на train/test или используйте кросс-валидацию (можно grid search) # # 2. Постройте несколько моделей линейной регрессии: # - sklearn.linear_model.LinearRegression # - sklearn.linear_model.Ridge # - sklearn.linear_model.Lasso # - sklearn.linear_model.ElasticNet # 3. Для последних трёх вариантов попробуйте разные значения коэффициента регуляризации. # 4. Сравните построенные модели по MSE на тестовой выборке (или через кросс-валидацию) # 5. Какая модель оказалась наилучшей? Почему? # + from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score diabetes_X, diabetes_y = datasets.load_diabetes(return_X_y=True)
04_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc as pm az.style.use("arviz-white") # # Logistic regression for NBA clutch free throws # # ### Chapter 4.3.3: Generalized linear models # # The NBA clutch free throws data set has three variables for player $i=1,...,10$: # # 1. $Y_i$ is the number clutch free throws made # 2. $N_i$ is the number clutch free throws attempted # 3. $q_i$ is the proportion of the non-clutch free throws made # # We model these data as # $$Y_i\sim\mbox{Binomial}(N_i,p_i),$$ # where $p_i$ is the true probability of making a clutch shot. The objective is to explore the relationship between clutch and overall percentages, $p_i$ and $q_i$. We do this using two logistic regression models: # # 1. $\mbox{logit}(p_i) = \beta_1 + \beta_2\mbox{logit}(q_i)$ # 2. $\mbox{logit}(p_i) = \beta_1 + \mbox{logit}(q_i)$ # # In both models we select uninformative priors $\beta_j\sim\mbox{Normal}(0,10^2)$. # # In the first model, $p_i=q_i$ if $\beta_1=0$ and $\beta_2=1$; in the second model $p_i=q_i$ if $\beta_1=0$. Therefore, we compare the posteriors of the $\beta_j$ to these values to analyze the relationship between $p_i$ and $q_i$. # + ## Load the data np.random.seed(820) Y = np.array([64, 72, 55, 27, 75, 24, 28, 66, 40, 13]) N = np.array([75, 95, 63, 39, 83, 26, 41, 82, 54, 16]) q = np.array([0.845, 0.847, 0.880, 0.674, 0.909, 0.899, 0.770, 0.801, 0.802, 0.875]) X = np.log(q) - np.log(1 - q) # X = logit(q) # - ## Plot the data inits = ("RW", "JH", "KL", "LJ", "SC", "IT", "GA", "JW", "AD", "KD") plt.plot(100 * q, 100 * Y / N, ".") plt.xlim(65, 95) plt.ylim(65, 95) plt.xlabel("Overall percentage") plt.ylabel("Clutch percentage") for i, txt in enumerate(inits): plt.annotate(txt, (100 * q[i] - 0.5, 100 * Y[i] / N[i] + 1)) plt.plot(plt.xlim(), plt.ylim(), "k"); with pm.Model() as model_1: # Priors β = pm.Normal("β", 0, 100, shape=2) p = pm.math.sigmoid(β[0] + β[1] * X) # likelihood y = pm.Binomial("y", n=N, p=p, observed=Y) samples_1 = pm.sample(2000) az.plot_trace(samples_1); az.summary(samples_1) with pm.Model() as model_2: # Priors β = pm.Normal("β", 0, 100) p = pm.math.sigmoid(β + X) # likelihood y = pm.Binomial("y", n=N, p=p, observed=Y) samples_2 = pm.sample(2000) az.plot_trace(samples_2); az.summary(samples_2) np.mean(samples_2["β"] < 0) # + az.plot_kde( samples_1["β"][:, 0], label="Model 1, intercept", plot_kwargs={"color": "C0", "ls": "--"} ) az.plot_kde(samples_1["β"][:, 1], label="Model 1, slope", plot_kwargs={"color": "C0"}) az.plot_kde(samples_2["β"], label="Model 2, slope", plot_kwargs={"color": "C1"}) plt.ylabel("posterior density") plt.xlabel("β");
BSM/Chapter_04_01_Logistic_regression_for_NBA_clutch_free_throws.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R 3.2 # language: R # name: ir32 # --- # # Nonmarine vs. Marine Analysis # #### <NAME>, [NEOS](http://www.neosgeo.com) # 2016 SEG Machine Learning Contest # # ## 1 Introduction # # This notebook analyzes the relationship between the Nonmarine vs. Marine log indicator (**NM_M** in the dataset) and its impact on facies. The idea is that different facies should have nearly all observations belonging to either the *nonmarine* class, or the *marine* class. If this is the case, it may be prudent to develop two separate classifiers split on the Nonmarine vs. Marine indicator. This may help reduce misclassification (observations with a *nonmarine* indicator may only be classified as a *nonmarine* facies, likewise for *marine*). Similarly, the machine learning algorithm may have already "learned" this - in which case the results will be equal or worse. # # After loading the data, I will examine the distribution of the **NM_M** feature across each facies. I will then determine which facies may be considered *nonmarine* origin, and which facies *marine* origin. I will then apply the Support Vector Machine algorithm to **A - the entire dataset** (as with *jpoirier001.ipynb*), and **B - nonmarine and marine separately** to assess any improvement in predictive accuracy. Here, accuracy will be measured by the average **F1 score** across all classes when applied to the **Newby** well - which will be considered a blind test and not included when training the model. # # First, let's load packages required to perform our analysis. # + # visualization packages library(repr) library(ggplot2) library(ggthemes) library(cowplot) # machine learning packages library(caret) library(e1071) # - # ## 2 Exploring the dataset # # First let's load the data and look at the first few rows. The data is contained in the file *facies_vectors.csv* and contains five wireline log measurements, two indicator variables, and a facies label at half-foot increments. # + # load data fname <- "../facies_vectors.csv" data <- read.csv(fname, colClasses=c(rep("factor",3), rep("numeric",6), "factor", "numeric")) # display first five rows of data set and it's dimensions head(data) paste(dim(data)[1], "rows x", dim(data)[2], "columns") # - # training and validation maybe? blind <- data[data$Well.Name == 'NEWBY',] data <- data[data$Well.Name != 'NEWBY',] # This data is from the Panoma Council Grove Field (predominantly gas reservoir) over 2700 sq mi in SW Kansas. The dataset is from nine wells with 4149 samples. Each sample consists of seven predictor variables and a rock facies. The validation (test) data have 830 samples from two wells having the same seven predictor variables. Facies are based on examination of cores from the nine wells taken vertically at half-foot intervals. The predictor variables include five wireline log measurements and two geologic constraining variables that are derived from geologic knowledge and are sampled at the same half-foot rate. # # The seven predictor variables are: # # * GR - Gamma ray # * ILD_log10 - Resistivity logging # * PE - Photoelectric effect (some wells are missing this log) # * DeltaPhi - Neutron-density porosity difference # * PHIND - Average neutron-density porosity # # The two geologic constraining variables are: # # * NM_M - nonmarine-marine indicator # * RELPOS - Relative position # # The nine discrete facies (classes of rock) are: # # 1. Nonmarine sandstone # 2. Nonmarine coarse siltstone # 3. Nonmarine fine siltstone # 4. Marine siltstone and shale # 5. Mudstone (limestone) # 6. Wackestone (limestone) # 7. Dolomite # 8. Pckstone-grainstone (limestone) # 9. Phylloid-algal bafflestone (limestone) # # These facies are not discrete and can gradually blend into one another. Some have neighboring facies that are rather close. Mislabeling within these neighboring facies can be expected to occur. The following table lists the facies, their abbreviated labels, and their approximate neighbors. # # # Facies |Label| Adjacent Facies # :---: | :---: |:--: # 1 |SS| 2 # 2 |CSiS| 1,3 # 3 |FSiS| 2 # 4 |SiSh| 5 # 5 |MS| 4,6 # 6 |WS| 5,7 # 7 |D| 6,8 # 8 |PS| 6,7,9 # 9 |BS| 7,8 # # Now let's define a colormap for the facies such that they are represented by consistent colors in this tutorial. We'll also take a peek at the statistical distribution of the input variables. # + # 1=sandstone, 2=c_siltstone, 3=f_siltstone, 4=marine_silt_shale, 5=mudstone, # 6=wackestone, 7=dolomite, 8=packestone, 9=bafflestone facies_colors <- c('#F4D03F', '#F5B041', '#DC7633', '#6E2C00', '#1B4F72', '#2E86C1', '#AED6F1', '#A569BD', '#196F3D') facies_labels <- c('SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS') summary(data) # - # Looking at the statistical summary of the input variables, it can be seen that all but the **PE** (photoelectric effect) inputs have no **NA**'s listed. For this tutorial, we will drop the feature vectors that do not have a valid **PE** entry. PE_mask <- complete.cases(data) data <- data[PE_mask,] paste(dim(data)[1], "rows x", dim(data)[2], "columns") # Out of the original 4149 samples, we will be training our model on 2769 samples. Now let's build some familiar log plots! logplot <- function(x, incl_fac=TRUE, incl_pred=FALSE) { # GR gamma ray track g1 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(0,400), breaks=seq(0,400,100), labels=c("0"="0","100"="","200"="200","300"="","400"="400")) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="Depth", y="GR") + geom_bar(stat="identity", data=x, aes(x=Depth, y=GR, fill=GR, alpha=0.5), width=0.5) + geom_line(aes(x=Depth, y=GR), lwd=.5, col='black') + scale_fill_continuous(limits=c(0,225), low="yellow", high="black") + theme(panel.grid.major.x = element_line(colour="gray", size=0.5), legend.position="none", axis.text=element_text(size=6), axis.title=element_text(size=8,face="bold")) g1 <- switch_axis_position(g1, 'x') # ILD resistivity track (transform it back to actual units) g2 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_log10(lim=c(0.1,50), breaks=c(.1,.2,.4,.6,.8,1,2,4,6,8,10,20,40), labels=c(".1"=".1",".2"="",".4"="",".6"="",".8"="", "1"="1","2"="","4"="","6"="","8"="","10"="10", "20"="","40"="")) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="ILD") + geom_line(aes(x=Depth, y=10^ILD_log10), lwd=.5, col="skyblue4") + theme(panel.grid.major.x = element_line(colour="gray", size=0.25), legend.position="none", axis.text=element_text(size=6), axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank()) g2 <- switch_axis_position(g2, 'x') # DeltaPhi track g3 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(-20,20), breaks=seq(-20,20,10),labels=c("-20"="-20","-10"="","0"="0","10"="","20"="20")) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="DeltaPhi") + geom_line(aes(x=Depth, y=DeltaPHI), lwd=.5, col="seagreen4") + theme(panel.grid.major.x = element_line(colour="gray", size=0.25), legend.position="none", axis.text=element_text(size=6), axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank()) g3 <- switch_axis_position(g3, 'x') # PHIND neutron porosity track g4 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(0,50), breaks=c(0,15,30,45)) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="PHIND") + geom_line(aes(x=Depth, y=PHIND), lwd=.5, col="firebrick") + theme(panel.grid.major.x = element_line(colour="gray", size=0.25), legend.position="none", axis.text=element_text(size=6), axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank()) g4 <- switch_axis_position(g4, 'x') # PE photoelectric effect track g5 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(0,8), breaks=c(0,2,4,6,8)) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="PE") + geom_line(aes(x=Depth, y=PE), lwd=.5, col="black") + theme(panel.grid.major.x = element_line(colour="gray", size=0.25), legend.position="none", axis.text=element_text(size=6), axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank()) g5 <- switch_axis_position(g5, 'x') x$ones <- rep(1, nrow(x)) # build a facies track if we are to include if (incl_fac) { g6 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(-0.1,1.1), breaks=c(0,1), labels=c("0"="", "1"="")) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="Facies") + geom_bar(stat="identity", data=x, aes(x=Depth, y=ones, fill=Facies), width=0.5) + scale_fill_manual(values=facies_colors, drop=F, labels=facies_labels) + theme(axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank(), axis.text.x=element_text(size=6)) } # build a prediction track if we are to include if (incl_pred) { # build Predicted Facies track g7 <- ggplot(x) + theme_economist_white(gray_bg=T) + scale_y_continuous(lim=c(-0.1,1.1), breaks=c(0,1), labels=c("0"="", "1"="")) + scale_x_continuous(trans="reverse") + coord_flip() + labs(title="", x="", y="Predicted") + geom_bar(stat="identity", data=x, aes(x=Depth, y=ones, fill=Predicted), width=0.5) + scale_fill_manual(values=facies_colors, drop=F, labels=facies_labels) + theme(legend.position="right", legend.text=element_text(size=6), legend.title=element_blank()) + theme(axis.title=element_text(size=8,face="bold"), axis.text.y=element_blank(), axis.text.x=element_text(size=6)) g7 <- switch_axis_position(g7, 'x') # finish off Facies track with no legend if we are to include if (incl_fac) { g6 <- g6 + theme(legend.position="none") g6 <- switch_axis_position(g6, 'x') # bring all the tracks together as a grid g <- plot_grid(g1, g2, g3, g4, g5, g6, g7, ncol=7, rel_widths=c(4,3,3,3,3,2,5)) } else { # bring all the tracks together as a grid g <- plot_grid(g1, g2, g3, g4, g5, g7, ncol=6, rel_widths=c(4,3,3,3,3,5)) } ggdraw() + draw_plot(g, width=1, height=1) + draw_plot_label(x$Well.Name[1], size=10) } else { if (incl_fac) { # finish off Facies track with a legend g6 <- g6 + theme(legend.position="right", legend.text=element_text(size=6), legend.title=element_blank()) g6 <- switch_axis_position(g6, 'x') # bring all the tracks together as a grid g <- plot_grid(g1, g2, g3, g4, g5, g6, ncol=6, rel_widths=c(4,3,3,3,3,6)) } else { # bring all the tracks together as a grid g <- plot_grid(g1, g2, g3, g4, g5, ncol=5, rel_widths=c(4,3,3,3,3)) } ggdraw() + draw_plot(g, width=1, height=1) + draw_plot_label(x$Well.Name[1], size=10) } } # + options(repr.plot.width=8, repr.plot.height=5) # plot logs for the Shrimplin and Shankle wells logplot(data[data$Well.Name == "SHRIMPLIN",]) logplot(data[data$Well.Name == "SHANKLE",]) # - # ## 3 Too marine or not too marine? # # Now let's examine the distribution of facies for both observations featuring a *nonmarine* indicator as well as a *marine* indicator. To do this, let's build a histogram faceted on the **NM_M** variable. # + options(repr.plot.width=8, repr.plot.height=4) # modify the NM_M factors to be a string - more descriptive and plots nicer levels(data$NM_M)[levels(data$NM_M)=="1"] <- "Nonmarine" levels(data$NM_M)[levels(data$NM_M)=="2"] <- "Marine" # build histogram faceted on the NM_M (nonmarine vs marine) feature g <- ggplot(data, aes(x=Facies)) + theme_economist_white(gray_bg=T) + facet_grid(. ~ NM_M) + geom_bar(aes(x=Facies, fill=Facies)) + labs(title="Distribution of Facies", x="", y="") + scale_x_discrete(labels=facies_labels) + scale_fill_manual(values=facies_colors, drop=F, labels=facies_labels) + theme(legend.position="none", legend.title=element_blank(), legend.text=element_text(size=6), axis.text=element_text(size=6), plot.title=element_text(size=10), axis.title=element_blank(), axis.ticks.x=element_blank()) g # - # That's a pretty interesting visual. The **SS**, **CSiS**, and **FSiS** facies appear to be **nonmarine**; while the remaining facies appear to be **marine**. There do seem to be a handful of cross-classifications - let's make a table to quantify the percentage of observations not following this trend. # + # modify the factor levels for facies to be more descriptive levels(data$Facies)[levels(data$Facies)=="1"] <- "SS" levels(data$Facies)[levels(data$Facies)=="2"] <- "CSiS" levels(data$Facies)[levels(data$Facies)=="3"] <- "FSiS" levels(data$Facies)[levels(data$Facies)=="4"] <- "SiSh" levels(data$Facies)[levels(data$Facies)=="5"] <- "MS" levels(data$Facies)[levels(data$Facies)=="6"] <- "WS" levels(data$Facies)[levels(data$Facies)=="7"] <- "D" levels(data$Facies)[levels(data$Facies)=="8"] <- "PS" levels(data$Facies)[levels(data$Facies)=="9"] <- "BS" # count observations of facies which are nonmarine or marine t <- table(data$Facies, data$NM_M == "Marine") # calculate those counts as percentages nm_percent <- round(100 * t[,1] / (t[,1] + t[,2]),0) m_percent <- round(100 * t[,2] / (t[,1] + t[,2]),0) t <- as.table(cbind(t[,1], t[,2], nm_percent, m_percent)) # format the table and output dimnames(t)[[2]] <- c('# Nonmarine', '# Marine', '% Nonmarine', '% Marine') t # - # **MS** - Mudstone is the facies with the most uncertainty here. Of the 189 observations of the **MS** facies, 11 (6%) were considered nonmarine. It's probably safe to call **SS**, **CSiS**, and **FSiS** all nonmarine, and the remaining facies marine. How many of the observations overall do not fall into this ordering of the data? # + # sum up how many correctly/incorrectly fall under our "rule" nm_m_true <- t[1,1] + t[2,1] + t[3,1] + t[4,2] + t[5,2] + t[6,2] + t[7,2] + t[8,2] + t[9,2] nm_m_false <- t[1,2] + t[2,2] + t[3,2] + t[4,1] + t[5,1] + t[6,1] + t[7,1] + t[8,1] + t[9,1] # percentage of observations not falling under our "rule* paste(round(100 * nm_m_false / (nm_m_true + nm_m_false),2), "%") # - # Less than 2% of observations fail our *rule*. Let's move forward to applying some Support Vector Machines to our data! # # ## 4 A support vector machine classifier # # As in the previous notebook *jpoirier001.ipynb*, let's build a Support Vector Machine classifier. First, we'll build a classifier and tune the results. Second, we'll build two classifiers (one for **nonmarine** indicators, one for **marine**). # # ### 4.1 Support vector machine with all data # # This workflow will be very similar to that of *jpoirier001.ipynb*. Ultimately we expect the same results. # + ## HELPER FUNCTIONS ## ################################################################################################ # list of adjacent facies adjacent_facies <- list(as.list(c(2)), as.list(c(1,3)), as.list(c(2)), as.list(c(5)), as.list(c(4,6)), as.list(c(5,7,8)), as.list(c(6,8)), as.list(c(6,7,9)), as.list(c(7,8))) # function to calculate the confusion matrix which includes adjacent facies as correctly classified adj_cm_table <- function(cm_table) { adj_cm_table <- cm_table # loop through facies to build adjacent facies confusion matrix for (i in 1:9) { cor <- cm_table[i,i] # move adjacently correct facies into correct facies for (j in 1:length(adjacent_facies[[i]])) { cor <- cor + cm_table[adjacent_facies[[i]][[j]],i] adj_cm_table[adjacent_facies[[i]][[j]],i] <- 0 } adj_cm_table[i,i] <- cor } # return adjacently corrected confusion matrix adj_cm_table } # function to display a confusion matrix # replaces the integer facies with facies shortnames first disp_cm <- function(cm) { dimnames(cm)$Prediction <- facies_labels dimnames(cm)$Reference <- facies_labels print(cm) } # cm - confusion matrix either as a confusionMatrix object or table object # x - the data used, from this we calculate how many observations (support) for each facies accuracy_metrics <- function(cm, x) { # if given confusion matrix (cm) is a confusionMatrix object if (class(cm) == "confusionMatrix") { df <- data.frame("Facies" = facies_labels, "Precision" = cm[["byClass"]][,5], "Recall" = cm[["byClass"]][,6], "F1" = cm[["byClass"]][,7], "Support" = as.matrix(table(x$Facies))) df[,-1] <- round(df[,-1],2) rownames(df) <- NULL } # if given confusion matrix is a table object else if (class(cm) == "table") { # initialize vectors for precision, recall, and f1 metrics with zeros prec <- rep(0,9) recall <- rep(0,9) f1 <- rep(0,9) # loop through facies to compute precision, recall, and f1 for each facies beta <- 1 for (i in 1:9) { prec[i] <- cm[i,i] / sum(cm[i,]) recall[i] <- cm[i,i] / sum(cm[,i]) f1[i] <- (1 + beta^2) * prec[i] * recall[i] / ((beta^2 * prec[i]) + recall[i]) } # calculate model metrics for precision, recall, and f1 and output df <- data.frame(Facies=facies_labels, Precision=prec, Recall=recall, F1=f1, Support=as.matrix(table(x$Facies))) # round values to two digits df[,-1] <- round(df[,-1],2) } # average F1 score across all classes print(paste0("Overall F1-score of: ", round(mean(df$F1, na.rm=T),2))) print("Accuracy metrics:") df } # + set.seed(1234) # split into training and test data sets feature_vectors <- data[, c("Facies", "GR", "ILD_log10", "DeltaPHI", "PHIND", "PE", "NM_M", "RELPOS")] trainIndex <- createDataPartition(feature_vectors$Facies, p=.8, list=F, times=1) x_train <- feature_vectors[trainIndex,] x_test <- feature_vectors[-trainIndex,] set.seed(3124) # calculates models for a variety of hyperparameters - this will take a few minutes tune.out <- tune(svm, Facies ~ ., data=x_train, kernel="radial", ranges=list(cost=c(.01,1,5,10,20,50,100,1000,5000,10000), gamma=c(.0001,.001,.01,1,10))) # predict facies using the best model cv_predictions <- predict(tune.out$best.model, newdata=x_test) ## PART ONE: Confusion matrix for facies classification "Facies classification confusion matrix:" cv_cm <- confusionMatrix(cv_predictions, x_test$Facies) disp_cm(as.matrix(cv_cm[["table"]])) ## PART TWO: Confusion matrix for adjacent facies classification "Adjacent facies classification confusion matrix:" cv_adj <- adj_cm_table(as.matrix(cv_cm[["table"]])) disp_cm(cv_adj) # - # Now let's apply the model to our blind data set an output the confusion matrices and accuracy metrics! # + # modify the factor levels for facies to be more descriptive levels(blind$Facies)[levels(blind$Facies)=="1"] <- "SS" levels(blind$Facies)[levels(blind$Facies)=="2"] <- "CSiS" levels(blind$Facies)[levels(blind$Facies)=="3"] <- "FSiS" levels(blind$Facies)[levels(blind$Facies)=="4"] <- "SiSh" levels(blind$Facies)[levels(blind$Facies)=="5"] <- "MS" levels(blind$Facies)[levels(blind$Facies)=="6"] <- "WS" levels(blind$Facies)[levels(blind$Facies)=="7"] <- "D" levels(blind$Facies)[levels(blind$Facies)=="8"] <- "PS" levels(blind$Facies)[levels(blind$Facies)=="9"] <- "BS" "PART ONE: Facies classification" blind_predictions <- predict(tune.out$best.model, newdata=blind[,c(-2,-3,-4)]) blind_cm <- confusionMatrix(blind_predictions, blind$Facies) disp_cm(as.matrix(blind_cm[["table"]])) accuracy_metrics(blind_cm, blind) "PART TWO: Adjacent facies classification" "Facies classification accuracy metrics" blind_adj <- adj_cm_table(as.matrix(blind_cm[["table"]])) disp_cm(blind_adj) accuracy_metrics(cv_cm, x_test) # - # These results are consistent with what we observed in *jpoirier001.ipynb* - an overall F1 score of 0.4 and 0.77 for Facies and Adjacent Facies classification respectively. # # ### 4.2.1 Support vector machine with nonmarine data # # Now let's subset the training data to only include observations with a nonmarine indicator. From this, I will build a support vector machine model. We will perform cross-validation again to see if the optimal model parameters are different for this subset of data. We'll wait to do the adjacent facies classification accuracy analysis once we bring these classifications back together with the marine classifications. # + # subset data - drop "marine" levels and also no longer train using NM_M channel since we only have one value nm_feature_vectors <- droplevels(feature_vectors[feature_vectors$NM_M == "Nonmarine" & feature_vectors$Facies %in% c("SS", "CSiS", "FSiS"),-7]) # split into training and cross-validation set.seed(3124) nm_trainIndex <- createDataPartition(nm_feature_vectors$Facies, p=.8, list=F, times=1) nm_x_train <- nm_feature_vectors[nm_trainIndex,] nm_x_test <- nm_feature_vectors[-nm_trainIndex,] # calculates models for a variety of hyperparameters - this will take a few minutes nm_tune.out <- tune(svm, Facies ~ ., data=nm_x_train, kernel="radial", ranges=list(cost=c(.01,1,5,10,20,50,100,1000,5000,10000), gamma=c(.0001,.001,.01,1,10))) summary(nm_tune.out) # predict facies using the best model nm_cv_predictions <- predict(nm_tune.out$best.model, newdata=nm_x_test) ## PART ONE: Confusion matrix for facies classification "Facies classification confusion matrix:" nm_cv_cm <- confusionMatrix(nm_cv_predictions, nm_x_test$Facies) print(as.matrix(nm_cv_cm[["table"]])) # - # If we recall *jpoirier001.ipynb*, the cross-validation showed best parameters of cost 10 and gamma 1. We observe here best parameters of cost 5 and gamma 1 (although, the error levels for each are very close). It is noteworthy that our best performance has improved from 0.2475297 in *jpoirier001.ipynb* to 0.1908166 for nonmarine classifications. What about marine? # # ### 4.2.2 Support vector machine with marine data # # Now let's subset the training data to only include observations with a marine indicator. From this, I will build a support vector machine model. We will perform cross-validation again to see if the optimal model parameters are different for this subset of data. # + # subset data - drop "nonmarine" levels and also no longer train using NM_M channel since we only have one value m_feature_vectors <- droplevels(feature_vectors[feature_vectors$NM_M == "Marine" & feature_vectors$Facies %in% c("SiSh", "MS", "WS", "D", "PS", "BS"),-7]) # split into training and cross-validation set.seed(3124) m_trainIndex <- createDataPartition(m_feature_vectors$Facies, p=.8, list=F, times=1) m_x_train <- m_feature_vectors[m_trainIndex,] m_x_test <- m_feature_vectors[-m_trainIndex,] # calculates models for a variety of hyperparameters - this will take a few minutes m_tune.out <- tune(svm, Facies ~ ., data=m_x_train, kernel="radial", ranges=list(cost=c(.01,1,5,10,20,50,100,1000,5000,10000), gamma=c(.0001,.001,.01,1,10))) summary(m_tune.out) # predict facies using the best model m_cv_predictions <- predict(m_tune.out$best.model, newdata=m_x_test) ## PART ONE: Confusion matrix for facies classification "Facies classification confusion matrix:" m_cv_cm <- confusionMatrix(m_cv_predictions, m_x_test$Facies) print(as.matrix(m_cv_cm[["table"]])) # - # For marine facies, our error level has actually gone up to 0.2506868. Realistically, some k-folds cross-validation here could give us a better picture of error levels. But for now, let's see what happens when we bring it all together! # # ### 4.2.3 Bringing nonmarine and marine classifications together # # Now let's apply the cross-validated models to the blind well data (**Newby**) and evaluate the results against our initial model built using all data at once. # + # modify the NM_M factors to be a string - more descriptive and plots nicer levels(blind$NM_M)[levels(blind$NM_M)=="1"] <- "Nonmarine" levels(blind$NM_M)[levels(blind$NM_M)=="2"] <- "Marine" # subset data - drop "nonmarine" levels and also no longer train using NM_M channel since we only have one value nm_blind <- blind[blind$NM_M == "Nonmarine",] m_blind <- blind[blind$NM_M == "Marine",] # predict facies using the best models nm_blind_predictions <- predict(nm_tune.out$best.model, newdata=nm_blind) m_blind_predictions <- predict(m_tune.out$best.model, newdata=m_blind) # combind the confusion matrixes for the nonmarine and marine predictions nm_blind_cm <- as.matrix(confusionMatrix(nm_blind_predictions, nm_blind$Facies)[["table"]]) m_blind_cm <- as.matrix(confusionMatrix(m_blind_predictions, m_blind$Facies)[["table"]]) nmm_blind_cm <- cv_adj nmm_blind_cm[1:9, 1:9] <- 0 nmm_blind_cm[1:3, 1:3] <- nm_blind_cm[1:3, 1:3] nmm_blind_cm[4:9, 4:9] <- m_blind_cm[4:9, 4:9] "PART ONE: Facies classification" "Facies classification accuracy metrics" nmm_blind_cm accuracy_metrics(nmm_blind_cm, blind) "PART TWO: Adjacent facies classification" "Facies classification accuracy metrics" nmm_blind_adj <- adj_cm_table(nmm_blind_cm) disp_cm(nmm_blind_adj) accuracy_metrics(nmm_blind_adj, blind) # - # ### 5 Conclusions # # While the overall F1-score of the adjacent facies classification increased from 0.77 to 0.83; the facies classification problem itself stayed the same at 0.4. The SVM algorithm likely identified this strong relationship between nonmarine/marine indicator and facies - so there was little we could do to improve it by manually forcing it. So let's try something else next.
jpoirier/archive/Nonmarine vs Marine Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import os project_names = [name for name in os.listdir('../projects') if os.path.isdir('../projects/' + name)] project_names.sort() project_names import urllib.parse safe_string = urllib.parse.quote_plus(project_names[0]) safe_string urllib.parse.quote(project_names[0]) encoded_urls = [urllib.parse.quote(x) for x in project_names] encoded_urls urllib.parse.unquote(encoded_urls[0]) project_names_dropdown_value = project_names[0] project_names_dropdown_value project_path = '../projects/' + project_names_dropdown_value run_file_names = [name for name in os.listdir('../projects/' + project_names_dropdown_value) if os.path.isdir('../projects/' + project_names_dropdown_value + '/' + name)] run_file_names.sort() run_file_names urllib.parse.unquote('/Classification%20Project/Summary%20of%20Models') path_name = urllib.parse.unquote('/Classification%20Project/Summary%20of%20Models') paths = path_name.split('/') paths.remove('') paths current_project = paths[0] print(current_project) current_model = paths[1] # + from os import listdir from os.path import isfile, join project_file = [f for f in listdir(project_path) if isfile(join(project_path, f)) and f.endswith('.yaml')] project_file # - project_file = [f for f in listdir(project_path) if isfile(join(project_path, f)) and f.endswith('.yaml')] project_path + '/project-metadata.yaml' from os.path import exists exists(project_path + '/project-metadata.yaml') pd.DataFrame.from_dict(metadata_dict,orient='index') import yaml import pandas as pd with open(project_path + '/project-metadata.yaml', "r") as stream: try: yaml_dict = pd.DataFrame.from_dict(yaml.safe_load(stream)['project'],orient='index') except yaml.YAMLError as exc: print(exc) yaml_dict=yaml_dict.reset_index() yaml_dict.columns=['name', 'values'] yaml_dict CURRENT_MODEL_DIRECTORY = '../projects/Classification Project/Random Forest' project_files = [f for f in listdir(CURRENT_MODEL_DIRECTORY) if isfile(join(CURRENT_MODEL_DIRECTORY, f)) and f.endswith('.yaml')] project_files os.path.join('asdf/', '/3', '2') os.path.join('../asdf') os.path.join('/', 'adfs') ['Run 2'] + ['Run 1']*10
app/Sandbox.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Getting Started with Matplotlib # We need `matplotlib.pyplot` for plotting. import matplotlib.pyplot as plt import pandas as pd # ## About the Data # In this notebook, we will be working with 2 datasets: # - Facebook's stock price throughout 2018 (obtained using the [`stock_analysis` package](https://github.com/stefmolin/stock-analysis)) # - Earthquake data from September 18, 2018 - October 13, 2018 (obtained from the US Geological Survey (USGS) using the [USGS API](https://earthquake.usgs.gov/fdsnws/event/1/)) # ## Plotting lines # + fb = pd.read_csv( 'data/fb_stock_prices_2018.csv', index_col='date', parse_dates=True ) plt.plot(fb.index, fb.open) plt.show() # - # Since we are working in a Jupyter notebook, we can use the magic command `%matplotlib inline` once and not have to call `plt.show()` for each plot. # + # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd fb = pd.read_csv( 'data/fb_stock_prices_2018.csv', index_col='date', parse_dates=True ) plt.plot(fb.index, fb.open) # - # ## Scatter plots # We can pass in a string specifying the style of the plot. This is of the form '[color][marker][linestyle]'. For example, we can make a black dashed line with `'k--'` or a red scatter plot with `'ro'`: plt.plot('high', 'low', 'ro', data=fb.head(20)) # ## Histograms quakes = pd.read_csv('data/earthquakes.csv') plt.hist(quakes.query('magType == "ml"').mag) # ### Bin size matters # Notice how our assumptions of the distribution of the data can change based on the number of bins (look at the drop between the two highest peaks on the righthand plot): x = quakes.query('magType == "ml"').mag fig, axes = plt.subplots(1, 2, figsize=(10, 3)) for ax, bins in zip(axes, [7, 35]): ax.hist(x, bins=bins) ax.set_title(f'bins param: {bins}') # ## Plot components # ### `Figure` # Top-level object that holds the other plot components. fig = plt.figure() # ### `Axes` # Individual plots contained within the `Figure`. # # ## Creating subplots # Simply specify the number of rows and columns to create: fig, axes = plt.subplots(1, 2) # As an alternative to using `plt.subplots()` we can add the `Axes` to the `Figure` on our own. This allows for some more complex layouts, such as picture in picture: fig = plt.figure(figsize=(3, 3)) outside = fig.add_axes([0.1, 0.1, 0.9, 0.9]) inside = fig.add_axes([0.7, 0.7, 0.25, 0.25]) # ## Creating Plot Layouts with `gridspec` # We can create subplots with varying sizes as well: fig = plt.figure(figsize=(8, 8)) gs = fig.add_gridspec(3, 3) top_left = fig.add_subplot(gs[0, 0]) mid_left = fig.add_subplot(gs[1, 0]) top_right = fig.add_subplot(gs[:2, 1:]) bottom = fig.add_subplot(gs[2,:]) # ## Saving plots # Use `plt.savefig()` to save the last created plot. To save a specific `Figure` object, use its `savefig()` method. fig.savefig('empty.png') # ## Cleaning up # It's important to close resources when we are done with them. We use `plt.close()` to do so. If we pass in nothing, it will close the last plot, but we can pass the specific `Figure` to close or say `'all'` to close all `Figure` objects that are open. Let's close all the `Figure` objects that are open with `plt.close()`: plt.close('all') # ## Additional plotting options # ### Specifying figure size # Just pass the `figsize` parameter to `plt.figure()`. It's a tuple of (width, height): fig = plt.figure(figsize=(10, 4)) # This can be specified when creating subplots as well: fig, axes = plt.subplots(1, 2, figsize=(10, 4)) # ### `rcParams` # A small subset of all the available plot settings (shuffling to get a good variation of options): # + import random import matplotlib as mpl rcparams_list = list(mpl.rcParams.keys()) random.seed(20) # make this repeatable random.shuffle(rcparams_list) sorted(rcparams_list[:20]) # - # We can check the current default `figsize` using `rcParams`: mpl.rcParams['figure.figsize'] # We can also update this value to change the default (until the kernel is restarted): mpl.rcParams['figure.figsize'] = (300, 10) mpl.rcParams['figure.figsize'] # Use `rcdefaults()` to restore the defaults: mpl.rcdefaults() mpl.rcParams['figure.figsize'] # This can also be done via `pyplot`: plt.rc('figure', figsize=(20, 20)) # change figsize default to (20, 20) plt.rcdefaults() # reset the default
ch_05/1-introducing_matplotlib.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.6.2 # language: julia # name: julia-0.5 # --- # # DiffEq Solutions # # ## Solution to the Lorenz Problem # + using DifferentialEquations f = @ode_def_nohes LorenzExample begin dx = σ*(y-x) dy = x*(ρ-z) - y dz = x*y - β*z end σ=>10.0 ρ=>28.0 β=>2.6666 u0 = big([0.1;0.0;0.0]) tspan = (big(0.0),big(100.0)) prob = ODEProblem(f,u0,tspan) sol = solve(prob); # - using Plots; gr(); plot(sol) plot(sol,vars=(:x,:y,:z)) # ## Solution to the Ball Bounce Problem # + f = function (t,u,du) du[1] = u[2] du[2] = -9.81 end condtion = function (t,u,integrator) # Event when event_f(t,u,k) == 0 u[1] end affect! = nothing affect_neg! = function (integrator) integrator.u[2] = -0.8integrator.u[2] end callback = ContinuousCallback(condtion,affect!,affect_neg!,interp_points=100) u0 = [50.0,0.0] tspan = (0.0,15.0) prob = ODEProblem(f,u0,tspan) sol = solve(prob,Tsit5(),callback=callback,adaptive=false,dt=1/4) plot(sol)
Notebooks/.ipynb_checkpoints/DiffEqSolutions-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .ps1 # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PowerShell (Full) # language: PowerShell # name: powershell-full # --- # + $ProjectDirectory = $Pwd.Path if($ProjectDirectory -match "Jupyter-PowerShell$") { @" # How to build the Jupyter PowerShell Kernel Start in the Jupyter-PowerShell folder, and make sure there's no output from the last run "@ | Write-Jupyter -Mimetype markdown } else { "# This notebook only works in the Jupyter-PowerShell project folder" | Write-Jupyter -Mimetype markdown Write-Error "You cannot build Jupyter from here" $ProjectDirectory = $Env:Temp } # - rm (Join-Path $ProjectDirectory Output\Release) -recurse # ## To build the project # # We really just need to `dotnet restore` and `dotnet build` --although currently, we require a very specific version (2.0.0-preview2-006502) of the dotnet CLI tools because we depend on the latest PowerShell Core bits, and they're _utterly_ incompatible with anything else. # # ### Increment the version # # The project files to use the new `--version-suffix` feature. To make sure your build isn't confused with an official one, you need to specify a suffix, like "-local-preview". # # ## To package the project # # In order to ship something, we need to `publish` it -- this includes the `build` step, so we can just call it directly. # # Once we've published, we need to package it with Chocolatey. dotnet restore dotnet publish -f netcoreapp2.0 -c Release --version-suffix "-beta-5" dotnet publish -f net462 -c Release --version-suffix "-beta-5" # We're just trying to rename the folders so that we can hash them: Move-Item Output\Release\net462\publish Output\Release\PowerShell-Full Move-Item Output\Release\netcoreapp2.0\publish Output\Release\PowerShell-Core Copy-Item tools Output\Release -Recurse Remove-Item Output\Release\net462 -Recurse Remove-Item Output\Release\netcoreapp2.0 -Recurse # Now generate the file catalog New-FileCatalog -CatalogFilePath Output\Release\tools\Jupyter-PowerShell.cat -Path Output\Release\ # Maybe sign the catalog if(Get-Module Authenticode -List) { Authenticode\Set-AuthenticodeSignature Output\Release\tools\Jupyter-PowerShell.cat } C:\ProgramData\chocolatey\choco.exe pack --outputdirectory Output\Release
Release.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import requests import nltk import json import random from nltk import word_tokenize,sent_tokenize import nltk from nltk.corpus import state_union from nltk.collocations import * import urllib.request import pickle from nltk.corpus import stopwords from string import punctuation from sklearn.feature_extraction.text import CountVectorizer from time import sleep import pickle import re from nltk.tokenize import word_tokenize from nltk.stem import WordNetLemmatizer # Remove punctuation import preprocessor as p import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel import botometer filename="suspended_account_index_list_for_sample" infile = open(filename,'rb') suspended_account_index_list = pickle.load(infile) infile.close() #print(tweeting_df) suspended_account_index_list_sample=suspended_account_index_list[:8] print(suspended_account_index_list_sample) # + #get the def read_and_process(file_name,user): full_text=[] with open(file_name) as f: for line in f: if json.loads(line)['lang']=='en' and json.loads(line)['user']['screen_name']==user: text=json.loads(line)['full_text'] if (text.startswith("RT") and ('retweeted_status' in json.loads(line))): full_text.append(json.loads(line)['retweeted_status']['full_text']) else: full_text.append(json.loads(line)['full_text']) #data.append(json.loads(line)) return full_text #ex_df=read_and_process("ae832c68a41b48b890a426e159076a9b_005.json") #print(ex_df) full_text_list_for_suspended_account_sample=[] for index in suspended_account_index_list_sample: user=tweeter_sample_by_num_of_status[index] full_text_1=read_and_process("ae832c68a41b48b890a426e159076a9b_001.json",user) full_text_2=read_and_process("ae832c68a41b48b890a426e159076a9b_002.json",user) full_text_3=read_and_process("ae832c68a41b48b890a426e159076a9b_003.json",user) full_text_4=read_and_process("ae832c68a41b48b890a426e159076a9b_004.json",user) full_text_list_for_suspended_account_sample.append(full_text_1+full_text_2+full_text_3+full_text_4) print(full_text_list_for_suspended_account_sample) # + filename = 'full_text_list_for_suspended_accounts' outfile = open(filename,'wb') pickle.dump(full_text_list_for_suspended_account_sample,outfile) outfile.close() flat_list = [item for sublist in full_text_list_for_suspended_account_sample for item in sublist] print(flat_list) # + from sklearn.feature_extraction.text import CountVectorizer import numpy as np import matplotlib.pyplot as plt import seaborn as sns p.set_options(p.OPT.URL, p.OPT.EMOJI) full_text_list_processed=[] for x in range(len(flat_list)): full_text=flat_list[x] clean = re.compile('<.*?>') full_text_processed=re.sub(clean, '', full_text) full_text_processed=p.clean(full_text) full_text_processed=re.sub('[,\.!?]', '', full_text_processed) full_text_processed = re.sub(r'[^a-zA-Z0-9\s]', ' ', full_text_processed) full_text_processed=full_text_processed.lower() full_text_processed = re.sub("#", "", full_text_processed) full_text_list_processed.append(full_text_processed) # Convert the titles to lowercase #full_text_list_processed = full_text_list_processed.apply(lambda x: x.lower())# Print out the first rows of papers print(full_text_list_processed) punc_word=set(punctuation) stop_word=set(stopwords.words("English")) self_defined_stop_words={"chemtrail","chemtrails ","chemtrails","Chemtrail","Chemtrails","GeoEngineering","geoengineering","IDoNotConsent","WeDoNotConsent","stopsprayingus","amp","geoengineering","idonotconsent","us","people","like"} new_stop_word=stop_word.union(punc_word,self_defined_stop_words) sns.set_style('whitegrid') # #%matplotlib inline # Helper function def plot_30_most_common_ngrams(count_data, count_vectorizer): words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts+=t.toarray()[0] count_dict = (zip(words, total_counts)) count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[:30] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.figure(2, figsize=(15, 15/1.6180)) plt.subplot(title='30 most common words') sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5}) sns.barplot(x_pos, counts, palette='husl') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.show()# Initialise the count vectorizer with the English stop words count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(1,1),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) """ count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(2,2),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) #print most common trigrams count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(3,3),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) """ import warnings warnings.simplefilter("ignore", DeprecationWarning)# Load the LDA model from sk-learn #from sklearn.decomposition import LatentDirichletAllocation as LDA import lda # Helper function def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print("\nTopic #%d:" % topic_idx) print(" ".join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) # Tweak the two parameters below number_topics = 20 number_words = 10# Create and fit the LDA model lda = lda.LDA(n_topics=number_topics) lda.fit(count_data)# Print the topics found by the LDA model print("Topics found via LDA:") print_topics(lda, count_vectorizer, number_words) # + filename="in_between_account_index_list_for_sample" infile = open(filename,'rb') in_between_account_index_list = pickle.load(infile) infile.close() #print(tweeting_df) in_between_account_index_list_sample=in_between_account_index_list[:8] print(in_between_account_index_list_sample) # + full_text_list_for_in_between_account_sample=[] for index in in_between_account_index_list_sample: user=tweeter_sample_by_num_of_status[index] print(user) full_text_1=read_and_process("ae832c68a41b48b890a426e159076a9b_001.json",user) full_text_2=read_and_process("ae832c68a41b48b890a426e159076a9b_002.json",user) full_text_3=read_and_process("ae832c68a41b48b890a426e159076a9b_003.json",user) full_text_4=read_and_process("ae832c68a41b48b890a426e159076a9b_004.json",user) full_text_list_for_in_between_account_sample.append(full_text_1+full_text_2+full_text_3+full_text_4) print(full_text_list_for_in_between_account_sample) # - filename = 'full_text_list_for_in_between_accounts' outfile = open(filename,'wb') pickle.dump(full_text_list_for_in_between_account_sample,outfile) outfile.close() p.set_options(p.OPT.URL, p.OPT.EMOJI) flat_list = [item for sublist in full_text_list_for_in_between_account_sample for item in sublist] print(flat_list) # + full_text_list_processed=[] for x in range(len(flat_list)): full_text=flat_list[x] clean = re.compile('<.*?>') full_text_processed=re.sub(clean, '', full_text) full_text_processed=p.clean(full_text) full_text_processed=re.sub('[,\.!?]', '', full_text_processed) full_text_processed = re.sub(r'[^a-zA-Z0-9\s]', ' ', full_text_processed) full_text_processed=full_text_processed.lower() full_text_processed = re.sub("#", "", full_text_processed) full_text_list_processed.append(full_text_processed) # Convert the titles to lowercase #full_text_list_processed = full_text_list_processed.apply(lambda x: x.lower())# Print out the first rows of papers print(full_text_list_processed) punc_word=set(punctuation) stop_word=set(stopwords.words("English")) self_defined_stop_words={"chemtrail","chemtrails ","chemtrails","Chemtrail","Chemtrails","GeoEngineering","geoengineering","IDoNotConsent","WeDoNotConsent","stopsprayingus","amp","geoengineering","idonotconsent","us","people","like"} new_stop_word=stop_word.union(punc_word,self_defined_stop_words) sns.set_style('whitegrid') # #%matplotlib inline # Helper function def plot_30_most_common_ngrams(count_data, count_vectorizer): words = count_vectorizer.get_feature_names() total_counts = np.zeros(len(words)) for t in count_data: total_counts+=t.toarray()[0] count_dict = (zip(words, total_counts)) count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[:30] words = [w[0] for w in count_dict] counts = [w[1] for w in count_dict] x_pos = np.arange(len(words)) plt.figure(2, figsize=(15, 15/1.6180)) plt.subplot(title='30 most common words') sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5}) sns.barplot(x_pos, counts, palette='husl') plt.xticks(x_pos, words, rotation=90) plt.xlabel('words') plt.ylabel('counts') plt.show()# Initialise the count vectorizer with the English stop words count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(1,1),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) """ count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(2,2),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) #print most common trigrams count_vectorizer = CountVectorizer(max_df=0.99,min_df=3,ngram_range=(3,3),stop_words=new_stop_word)# Fit and transform the processed titles count_data = count_vectorizer.fit_transform(full_text_list_processed)# Visualise the 30 most common words plot_30_most_common_ngrams(count_data, count_vectorizer) """ import warnings warnings.simplefilter("ignore", DeprecationWarning)# Load the LDA model from sk-learn #from sklearn.decomposition import LatentDirichletAllocation as LDA import lda # Helper function def print_topics(model, count_vectorizer, n_top_words): words = count_vectorizer.get_feature_names() for topic_idx, topic in enumerate(model.components_): print("\nTopic #%d:" % topic_idx) print(" ".join([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) # Tweak the two parameters below number_topics = 20 number_words = 10# Create and fit the LDA model lda = lda.LDA(n_topics=number_topics) lda.fit(count_data)# Print the topics found by the LDA model print("Topics found via LDA:") print_topics(lda, count_vectorizer, number_words) # -
Paper_code_version_1/topic_model_for_sample_tweeter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Permanganant/EHB-420E---Artificial-Neural-Networks-/blob/main/HW3_ANN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="oicJ6VG4lzp7" # # EHB 420E Artificial Neural Networks # # Homework - 3 # # <NAME> # # You can reach the homework with this https://colab.research.google.com/drive/19u0HQd1FYe9KvSRJvicRj9caQqLQEXAi#scrollTo=_Pr9-FT6ik-g link # + [markdown] id="E4Hy-Djv1tKy" # ## Problem - 1 # # --- # # The purpose of this practice is first to be able to fill the gaps. Because not real-world # solutions are black and white. There is always going to be unforeseen variables. In this # exercise, you must create various variable and outputs for various scenarios. You should # show an ability to choose most convincing conclusion among them. # The current touristic town has 505505 people # The amount of people leave from the town daily # P_leave = 56780 # decrease the amount of people by 10% to account for population # add the people variable to the p_number variable # increase p_number by 5% to account for next day that touris entrance into the town # in the days following weekend # decrease p_number by 5% to account for kids # subtract 25000 people from p_number to account for town workers # print the new value of the p_number variable # + colab={"base_uri": "https://localhost:8080/"} id="iREmip_d1wsp" outputId="4b8d95c8-6d67-40dd-f542-1a167c073c67" current_pop = 505505 #tourist+town workers+ children + p_number P_leave = 56780 #daily leaving p_number = 5 for j in range (0,1): for i in range(1,8): p_number += current_pop - current_pop/10 if(i == 6 or i == 7): p_number += p_number*(1/20) p_number -= 25000 p_number -= p_number*(1/20) print(current_pop) current_pop -= P_leave p_number # + [markdown] id="bCMsDZOvk1Ha" # ## Problem - 2 # # --- # # # Please review the following data set, which is uploaded to the class documents as Data Set 1: # AI_Dataset1.txt # Data show I-V characteristics of array of LED devices. # Experiments taken back to back for 21 identical devices. Every voltage sweep includes, reverse # sweep as well. # Please try to clean, sort, the data and try to come up most meaningful I-V degradation graphics. # # --- # Importing required libraries # + id="BJBa4IHJnr6y" import os import numpy as np import matplotlib.pyplot as plt from matplotlib.pyplot import figure # + [markdown] id="MDlfFkTsn0v5" # This function below reads the data set given in the question and returns the desired data as output. First input of the function takes the measurement number and the second input takes the indicator. Indicators can be 3,5,9,10,11,15,16,17. Indicators are for getting desired data from the function. 3 represents the U2722 Channel settings, 5 represents the B2900 Channel 1 settings, 9 represents the Voltage_Sweep_Channel data, 10 represents the Current1_Sweep_Channel data, 11 represents the Current2_Sweep_Channel, 15 represents the Voltage_Sweep_Channel_Reversed data, 16 represents the Current1_Sweep_Channel reversed data and 17 represents the Current2_Sweep_Channel reversed data. To clean zeros at the end maximization index technique was used. # + id="ZQGhDLpBMRKj" def led_data(Measurment_num,indicator): general_path = '/content/drive/MyDrive/Classroom/ANN/AI_Dataset1.txt' with open(general_path, encoding='utf8') as f: counter = 0 zeta = 18 i = Measurment_num-1 for line in f: counter += 1 if counter == indicator +(i*zeta) and indicator == 3:#3 #U2722 Channel settings: text = line.strip() Current_Range_str = 'Current Range:' index = text.find(Current_Range_str) Current_Range = text[index+len(Current_Range_str):text.find('mA')] Voltage_Limit_str = 'Voltage Limit (V):' index = text.find(Voltage_Limit_str) Voltage_Limit = text[index+len(Voltage_Limit_str):text.find('O')] Output_Level_str = 'Output Leve (mA):' index = text.find(Output_Level_str) Output_Level = text[index+len(Output_Level_str):-1] return float(Current_Range), float(Voltage_Limit), float(Output_Level) elif counter == indicator +(i*zeta) and indicator == 5:#5 text = line.strip() #B2900 Channel 1 settings: ChSweep_Start_str = 'Ch.1Sweep Start (V):' index = text.find(ChSweep_Start_str) ChSweep_Start = text[index+len(ChSweep_Start_str):text.find('\tCh.1 Sweep Stop')] ChSweep_Stop_str = 'Ch.1 Sweep Stop (V):' index = text.find(ChSweep_Stop_str) ChSweep_Stop = text[index+len(ChSweep_Stop_str):text.find('\tCh.2 Sweep Number')] ChSweep_Number_Steps_str = 'Ch.2 Sweep Number of Steps:' index = text.find(ChSweep_Number_Steps_str) ChSweep_Number_Steps = text[index+len(ChSweep_Number_Steps_str):text.find('\tCh.2 Current Range')] ChCurrent_Range_str = 'Ch.2 Current Range (A):' index = text.find(ChCurrent_Range_str) ChCurrent_Range = text[index+len(ChCurrent_Range_str):-1] return ChSweep_Start, ChSweep_Stop , ChSweep_Number_Steps, ChCurrent_Range elif counter == indicator + (i*zeta) and indicator == 9:#9 text = line.split() text = text[4:-1] Voltage_Sweep_Channel = text[0:text.index(max(text))] #Maximize index can eliminate the zeros at the end Voltage_Sweep_Channel = list(map(float, Voltage_Sweep_Channel)) return Voltage_Sweep_Channel elif counter == indicator +(i*zeta) and indicator == 10:#10 text = line.split() text = text[4:-1] Current1_Sweep_Channel = text[0:-1] Current1_Sweep_Channel = list(map(float, Current1_Sweep_Channel)) return Current1_Sweep_Channel elif counter == indicator +(i*zeta) and indicator == 11:#11 text = line.split() text = text[4:-1] Current2_Sweep_Channel = text[0:-1] Current2_Sweep_Channel = list(map(float, Current2_Sweep_Channel)) return Current2_Sweep_Channel #Reverse Sweep elif counter == indicator +(i*zeta) and indicator == 15:#15 text = line.split() text = text[5:-1] Voltage_Sweep_Channel_R = text[0:-1] Voltage_Sweep_Channel_R = list(map(float, Voltage_Sweep_Channel_R)) return Voltage_Sweep_Channel_R elif counter == indicator +(i*zeta) and indicator == 16:#16 text = line.split() Current1_Sweep_Channel_R = text[4:-1] Current1_Sweep_Channel_R = list(map(float, Current1_Sweep_Channel_R)) return Current1_Sweep_Channel_R elif counter == indicator +(i*zeta) and indicator == 17:#17 text = line.split() Current2_Sweep_Channel_R = text[4:-1] Current2_Sweep_Channel_R = list(map(float, Current2_Sweep_Channel_R)) return Current2_Sweep_Channel_R # + [markdown] id="5n3Cu2B5ptF7" # The code below prints all the U2722 and B2900 channel settings for all 27 measurements. # + colab={"base_uri": "https://localhost:8080/"} id="914VkDfViEZp" outputId="37a5b986-f79b-4498-9210-cd201f0b9c99" for i in range(1,28): Current_Range, Voltage_Limit,Output_Level = led_data(i,3) print("U2722 Channel %s settings:" %i) print("Current Range (mA): ", Current_Range, "Voltage Limit (V): ", Voltage_Limit ,"Output Level (mA): ", Output_Level) print("\n") ChSweep_Start, ChSweep_Stop , ChSweep_Number_Steps, ChCurrent_Range = led_data(i,5) print("B2900 Channel %s settings:" %i) print("Ch.1Sweep Start (V):", ChSweep_Start, " Ch.1 Sweep Stop (V):", ChSweep_Stop ," Ch.2 Sweep Number of Steps:", ChSweep_Number_Steps," Ch.2 Current Range (A): ", ChCurrent_Range) print("\n") # + [markdown] id="5ncN2-NMmDZg" # This section below plots the V-I characteristic for all 27 measurements. The first two plot for each figure represents **Voltage Sweep Channel - Current1 Sweep Channel(red colored)** and **Voltage Sweep Channel - Current2 Sweep Channel(green colored)** plots respectively. The second two plot for each figure represents **Voltage Sweep Reversed Channel - Current1 Sweep Channel(orange colored)** and **Voltage Sweep Reversed Channel - Current2 Sweep Channel(blue colored)** # # # # # # # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_Pr9-FT6ik-g" outputId="608e3f67-7b95-43f7-97d0-92d70eacb2f5" for i in range (1,28): V_s = led_data(i,9) C1 = led_data(i,10) C2 = led_data(i,11) V_sr = led_data(i,15) C1_r = led_data(i,16) C2_r = led_data(i,17) plt.figure(i) figure(figsize=(20, 20), dpi=100) fig, ax = plt.subplots(2, 2) ax[0, 0].plot(V_s,C1[0:len(V_s)],color = 'red') ax[0, 1].plot(V_s,C2[0:len(V_s)],color = 'green') ax[1, 0].plot(V_sr,C1_r[0:len(V_sr)],color = 'orange') ax[1, 1].plot(V_sr,C2_r[0:len(V_sr)],color = 'blue') ax[0, 0].set_title("Voltage Sweep Channel %s - Current1 Sweep Channel %s" %(i,i),fontsize= 7) ax[0, 1].set_title("Voltage Sweep Channel %s - Current2 Sweep Channel %s" %(i,i),fontsize= 7) ax[1, 0].set_title("Voltage Sweep Channel Reversed %s - Current1 Sweep Channel %s" %(i,i),fontsize= 7) ax[1, 1].set_title("Voltage Sweep Channel Reversed %s - Current2 Sweep Channel %s" %(i,i),fontsize= 7) fig.tight_layout() plt.show()
HW3_ANN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 493} id="FF1uVrQgUxvY" outputId="a13af83f-d734-4aa9-fde1-14cd5364293e" import numpy as np class Perceptron(object): def _init_(self, learning_rate=0.01, n_iter=100, random_state=1): self.learning_rate = learning_rate self.n_iter = n_iter self.random_state = random_state def fit(self, X, y): rand = np.random.RandomState(self.random_state) self.weights = rand.normal(loc=0.0, scale=0.01, size=1 + X.shape[1]) self.errors_ = [] for _ in range(self.n_iter): errors = 0 for x, target in zip(X, y): update = self.learning_rate * (target - self.predict(x)) self.weights[1:] += update * x self.weights[0] += update errors += int(update != 0.0) self.errors_.append(errors) return self def net_input(self, X): z = np.dot(X, self.weights[1:]) + self.weights[0] return z def predict(self, X): return np.where(self.net_input(X) >= 0, 1,-1) from sklearn.datasets import load_iris X,y = load_iris(return_X_y=True) import matplotlib.pyplot as plt import numpy as np # %matplotlib inline plt.scatter(X[:50, 0], X[:50, 1], color='green', marker='x', label='setosa') plt.scatter(X[50:100, 0], X[50:100, 1], color='red', marker='o', label='versicolor') plt.xlabel('sepal length') plt.ylabel('petal length') plt.legend(loc='upper right') plt.show() per = Perceptron(learning_rate=0.1, n_iter=100, random_state=1) per.fit(X, y) plt.plot(range(1, len(per.errors_) + 1), per.errors_, marker='o') plt.xlabel('Epochs') plt.ylabel('Number of updates') plt.show() # + [markdown] id="F7_p4z4kZwFR" # # New Section
ANN_Experiment1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #load packages import numpy as np import pandas as pd import scipy from PIL import Image import glob import os from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer import matplotlib.pyplot as plt from pandarallel import pandarallel from PIL import Image import requests from io import BytesIO from tqdm import tqdm # %matplotlib inline df_movie = pd.read_excel("../data/01_test_posters.xlsx") for i in tqdm(range(1, df_movie.shape[0])): try: url = df_movie['Poster'].iloc[i] name = df_movie['imdbId'].iloc[i] response = requests.get(url) img = Image.open(BytesIO(response.content)) size = 182, 268 img.thumbnail(size) img.save("../data/movie-genre-from-its-poster/TestPoster/"+str(name)+'.jpg') except: pass
notebook/image_download_sample_images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import pandas as pd import numpy as np from matplotlib import pyplot as plt import matplotlib.dates as mdates import seaborn as sns sns.set_style('white') # - # ## Selecting only closed loans # 887,379 loans in total loans = pd.read_csv('../data/loan.csv') loans['grade'] = loans['grade'].astype('category', ordered=True) loans['last_pymnt_d'] = pd.to_datetime(loans['last_pymnt_d'])#.dt.strftime("%Y-%m-%d") loans.shape loans['loan_status'].unique() # most loans are current sns.countplot(loans['loan_status'], color='turquoise') plt.xticks(rotation=90) plt.savefig('../figures/barplot_loan_statusses.jpg', bbox_inches='tight') # exclude current loans leaves 256,939 (about 30%) closed_status = ['Fully Paid', 'Charged Off', 'Does not meet the credit policy. Status:Fully Paid', 'Does not meet the credit policy. Status:Charged Off'] closed_loans = loans[loans['loan_status'].isin(closed_status)] closed_loans.shape sns.countplot(closed_loans['loan_status'], color='turquoise') plt.xticks(rotation=90) plt.savefig('../figures/barplot_loan_statusses_closed.jpg', bbox_inches='tight') # two categories: paid/unpaid paid_status = ['Fully Paid', 'Does not meet the credit policy. Status:Fully Paid'] closed_loans['paid'] = [True if loan in paid_status else False for loan in closed_loans['loan_status']] sns.countplot(closed_loans['paid']) plt.xticks(rotation=90) # ## Investigating closed loans # #### features summary # # Total loans: 256,939 # Total features: 74 # # Loan # - id: loan # - loan_amnt: 1914 times is loan amount bigger than funded amount # - funded_amnt # - funded_amnt_inv # - term: 36 or 60 months # - int_rate: interest rates # - installment: height monthly pay # - grade: A-G, A low risk, G high risk # - sub_grade # - issue_d: month-year loan was funded # - loan_status # - pymnt_plan: n/y # - url # - desc: description provided by borrower # - purpose: 'credit_card', 'car', 'small_business', 'other', 'wedding', 'debt_consolidation', 'home_improvement', 'major_purchase', 'medical', 'moving', 'vacation', 'house', 'renewable_energy','educational' # - title: provided by borrower # - initial_list_status: w/f (what is this?) # - out_prncp: outstanding prinicipal --> still >0 in fully paid?! # - out_prncp_inv # - total_pymnt # - total_pymnt_inv # - total_rec_prncp # - total_rec_int: total recieved interest # - total_rec_late_fee # - recoveries: post charged off gross recovery # - collection_recovery_fee: post charged off collection fee # - last_pymnt_d # - last_pymnt_amnt # - next_pymnt_d # - collections_12_mths_ex_med: almost all 0 # - policy_code: 1 publicly available, 2 not # - application_type (only 1 JOINT, rest INDIVIDUAL) # # # Borrower # - emp_title # - emp_length: 0-10 (10 stands for >=10) # - home_ownership: 'RENT', 'OWN', 'MORTGAGE', 'OTHER', 'NONE', 'ANY' # - member_id: person # - annual_inc (stated by borrower) # - verification_status: 'Verified', 'Source Verified', 'Not Verified' (income verified by LC?) # - zip_code # - addr_state # - dti: debt to income (without mortgage) # - delinq_2yrs: The number of 30+ days past-due incidences of delinquency in the borrower's credit file for the past 2 years # - mths_since_last_delinq # - mths_since_last_record # - pub_rec # - earliest_cr_line # - inq_last_6mths # - open_acc (nr of open credit lines) # - total_acc (nr of total credit lines in credit file) # - revol_bal # - last_credit_pull_d # - mths_since_last_major_derog: Months since most recent 90-day or worse rating # - acc_now_delinq: The number of accounts on which the borrower is now delinquent. # - tot_coll_amt: Total collection amounts ever owed # - tot_cur_bal: Total current balance of all accounts # - open_acc_6m: Number of open trades in last 6 months # - open_il_6m: Number of currently active installment trades # - open_il_12m: Number of installment accounts opened in past 12 months # - open_il_24m # - mths_since_rcnt_il: Months since most recent installment accounts opened # - total_bal_il: Total current balance of all installment accounts # - il_util: Ratio of total current balance to high credit/credit limit on all install acct # - open_rv_12m: Number of revolving trades opened in past 12 months # - open_rv_24m # - max_bal_bc: Maximum current balance owed on all revolving accounts # - all_util: Balance to credit limit on all trades # - total_rev_hi_lim: Total revolving high credit/credit limit # - inq_fi: Number of personal finance inquiries # - total_cu_tl: Number of finance trades # - inq_last_12m: Number of credit inquiries in past 12 months # # Two borrowers (only in 1 case) # - annual_inc_joint # - dti_joint # - verification_status_joint # # #### Difference between default and charged off # # In general, a note goes into Default status when it is 121 or more days past due. When a note is in Default status, Charge Off occurs no later than 150 days past due (i.e. No later than 30 days after the Default status is reached) when there is no reasonable expectation of sufficient payment to prevent the charge off. However, bankruptcies may be charged off earlier based on date of bankruptcy notification. # # --> so default is not closed yet (so threw that one out). # # 1914 loans amounts bigger than funded amount sum(closed_loans['loan_amnt'] != closed_loans['funded_amnt']) # nr of null values per feature nr_nulls = closed_loans.isnull().apply(sum, 0) nr_nulls = nr_nulls[nr_nulls != 0] ratio_missing = nr_nulls.sort_values(ascending=False) / 255720 ratio_missing.to_csv('../data/missing_ratio.txt', sep='\t') ratio_missing sns.distplot(closed_loans['funded_amnt'], kde=False, bins=50) plt.savefig('../figures/funded_amount.jpg') # closed loans about 20% are 60 months # all loans lot of missing data, rest 30% are 60 months sns.countplot(closed_loans['term'], color='darkblue') plt.title('closed') plt.savefig('../figures/term_closed.jpg') plt.show() sns.countplot(loans['term']) plt.title('all') # ## TODO: interest questions # higher interest rate more interesting for lenders # higher grade gets higher interest rate (more risk) # does it default more often? # do you get richer from investing in grade A-C (less default?) or from D-G (more interest)? fig = sns.distplot(closed_loans['int_rate'], kde=False, bins=50) fig.set(xlim=(0, None)) plt.savefig('../figures/int_rates.jpg') sns.boxplot(data=closed_loans, x='grade', y='int_rate', color='turquoise') plt.savefig('../figures/boxplots_intrate_grade.jpg') sns.stripplot(data=closed_loans, x='grade', y='int_rate', color='gray') # closed_loans['collection_recovery_fee'] closed_loans['profit'] = (closed_loans['total_rec_int'] + closed_loans['total_rec_prncp'] + closed_loans['total_rec_late_fee'] + closed_loans['recoveries']) - closed_loans['funded_amnt'] profits = closed_loans.groupby('grade')['profit'].sum() sns.barplot(data=profits.reset_index(), x='grade', y='profit', color='gray') plt.savefig('../figures/profit_grades.jpg') plt.show() profits = closed_loans.groupby('paid')['profit'].sum() sns.barplot(data=profits.reset_index(), x='paid', y='profit') plt.show() profits = closed_loans.groupby(['grade', 'paid'])['profit'].sum() sns.barplot(data=profits.reset_index(), x='profit', y='grade', hue='paid', orient='h') plt.savefig('../figures/profit_grades_paid.jpg') plt.show() # Sort off normally distributed --> statistically test whether means are different? sns.distplot(closed_loans[closed_loans['paid']==True]['int_rate']) sns.distplot(closed_loans[closed_loans['paid']==False]['int_rate']) plt.savefig('../figures/int_rate_paid.jpg') grade_paid = closed_loans.groupby(['grade', 'paid'])['id'].count() risk_grades = dict.fromkeys(closed_loans['grade'].unique()) for g in risk_grades.keys(): risk_grades[g] = grade_paid.loc[(g, False)] / (grade_paid.loc[(g, False)] + grade_paid.loc[(g, True)]) risk_grades = pd.DataFrame(risk_grades, index=['proportion_unpaid_loans']) sns.stripplot(data=risk_grades, color='darkgray', size=15) plt.savefig('../figures/proportion_grades.jpg') # does the purpose matter for the chance of charged off? sns.countplot(closed_loans['purpose'], color='turquoise') plt.xticks(rotation=90) plt.show() purpose_paid = closed_loans.groupby(['purpose', 'paid'])['id'].count() sns.barplot(data=pd.DataFrame(purpose_paid).reset_index(), x='purpose', y='id', hue='paid') plt.xticks(rotation=90) plt.savefig('../figures/purposes.jpg', bbox_inches='tight') # debt to income sns.boxplot(data=closed_loans, x='paid', y='dti') plt.savefig('../figures/dti.jpg') # ## Investigate whether the two weird 'does not meet' categories should stay in there, are they really closed? # Next payment day is not NAN in the 'does not meet' categories. # Outstanding principle is all 0 (so not active anymore) # Indeed seems like older loans # --> seems they are in fact closed, so leave them in sns.countplot(closed_loans[closed_loans['next_pymnt_d'].notnull()]['loan_status']) plt.xticks(rotation=90) plt.savefig('../figures/last_payment_day.jpg', bbox_inches='tight') plt.show() print(closed_loans['loan_status'].value_counts()) new_loans = ['Fully Paid', 'Charged Off'] sns.countplot(data=closed_loans[~closed_loans['loan_status'].isin(new_loans)], x='last_pymnt_d', hue='loan_status') plt.xticks([]) plt.savefig('../figures/last_payment_day_old.jpg') plt.show() sns.countplot(data=closed_loans[closed_loans['loan_status'].isin(new_loans)], x='last_pymnt_d', hue='loan_status') plt.xticks([]) plt.savefig('../figures/last_payment_day_new.jpg') plt.show() closed_loans['out_prncp'].value_counts() # ## something weird with policy 2? # http://www.lendacademy.com/forum/index.php?topic=2427.msg20813#msg20813 # Only policy 1 loans in this case, so no problem. closed_loans['policy_code'].value_counts()
project/notebooks/lending-club-exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # author=cxf # date=2020-8-8 # file for test predicted cutoff import numpy as np import pandas as pd import matplotlib.pyplot as mp import warnings warnings.filterwarnings("ignore") index1=['sample'] index2=[i for i in range(0,11)] index_list=index1+index2 # get result predicted test_x=pd.read_csv('../3.model_training/test_feature.csv',index_col=0) test_y=pd.read_csv('../3.model_training/test_cutoff.csv',index_col=0) # merge sites information # merge error rate error_run1=pd.read_csv('../0.prepare_processing/run1/error.txt',names=index_list,index_col=0) error_run2=pd.read_csv('../0.prepare_processing/run2/error.txt',names=index_list,index_col=0) error_all=pd.concat([error_run1,error_run2],axis=0,sort=False) # - import math from scipy.stats import ttest_rel precision_dict={90:[0.02,0.05],95:[0.01,0.025],99:[0.002,0.005]} for i in range(1,6): for precision,error_rate in precision_dict.items(): # merge sites information df_sites_run1=pd.read_csv(f'../0.prepare_processing/run1/a{precision}.txt',names=index_list,index_col=0) df_sites_run2=pd.read_csv(f'../0.prepare_processing/run2/a{precision}.txt',names=index_list,index_col=0) df_sites_all=pd.concat([df_sites_run1,df_sites_run2],axis=0,sort=False) # error rate and number of genotyped sites model vs different stiff cutoff with different criterion df_x=test_x[test_x['precise']==precision] df_y=pd.DataFrame(test_y[test_x['precise']==precision]['pred_cutoff']) df_error=pd.merge(df_y,error_all,on='sample') df_sites=pd.merge(df_y,df_sites_all,on='sample') for error in error_rate: # number of samples passed at different cutoffs cutoff_pass_num=df_error[df_error[i]<error].shape[0] # number of sites genotyped of samples passed at different cutoffs cutoff_pass_sites=df_sites[df_error[i]<error] # site and sample number at predicted cutoff total=df_error.shape[0] predict_pass_num=0 predict_pass_sites=[] predict_error_rate=[] for item in df_error.iterrows(): cutoff=item[1]['pred_cutoff'] error_rate=item[1][int(cutoff)] predict_error_rate.append(error_rate) if error_rate<error: predict_pass_num+=1 for item in df_sites.iterrows(): cutoff=item[1]['pred_cutoff'] sites=item[1][int(cutoff)] predict_pass_sites.append(sites) # paired T test df_x['sites']=predict_pass_sites df_x['error']=predict_error_rate df_predict_pass_sites=df_x[df_x['error']<error] total_num=df_y.shape[0] if cutoff_pass_sites.shape[0]>1: X_sum=cutoff_pass_sites.shape[0] df_predict_and_real=pd.merge(df_predict_pass_sites,cutoff_pass_sites,on='sample') print(f"{i}X,{precision},{error},{df_predict_pass_sites.shape[0]}({round(df_predict_pass_sites.shape[0]/total_num*100,2)}%),{X_sum}({round(X_sum/total_num*100,2)}%),{round(df_predict_and_real['sites'].mean(),2)}±{round(df_predict_and_real['sites'].std()/math.sqrt(df_predict_and_real.shape[0]),2)},{round(df_predict_and_real[i].mean(),2)}±{round(df_predict_and_real[i].std()/math.sqrt(df_predict_and_real.shape[0]),2)},{round(ttest_rel(df_predict_and_real[i], df_predict_and_real['sites'])[0],2)},{'%.2e'%ttest_rel(df_predict_and_real[i], df_predict_and_real['sites'])[1]}") else: X_sum=0 df_predict_and_real=df_predict_pass_sites print(f"{i}X,{precision},{error},{df_predict_pass_sites.shape[0]}({round(df_predict_pass_sites.shape[0]/total_num*100,2)}%),0,{round(df_predict_and_real['sites'].mean(),2)}±{round(df_predict_and_real['sites'].std()/math.sqrt(df_predict_and_real.shape[0]),2)},0,-,-") #if X_sum>1: #print(f"{i}X,{precision},{error},{df_predict_pass_sites.shape[0]}({round(df_predict_pass_sites.shape[0]/total_num*100,2)}%),{X_sum}({round(X_sum/total_num*100,2)}%),{round(df_predict_and_real['sites'].mean(),2)}±{round(df_predict_and_real['sites'].std()/math.sqrt(df_predict_and_real.shape[0]),2)},{round(df_predict_and_real[0].mean(),2)}±{round(df_predict_and_real[0].std()/math.sqrt(df_predict_and_real.shape[0]),2)},{round(ttest_rel(df_predict_and_real[0], df_predict_and_real['sites'])[0],2)},{'%.2e'%ttest_rel(df_predict_and_real[0], df_predict_and_real['sites'])[1]}") #else: #print(f"{i}X,{precision},{error},{df_predict_pass_sites.shape[0]}({round(df_predict_pass_sites.shape[0]/total_num*100,2)}%),0,{round(df_predict_and_real['sites'].mean(),2)}±{round(df_predict_and_real['sites'].std()/math.sqrt(df_predict_and_real.shape[0]),2)},0,-,-")
model_training/4.test_cutoff/predicted cutoff vs indiscriminate cutoff.ipynb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="7238ae8d-73dc-474b-9d36-92647cdc6030" _execution_state="idle" _uuid="691c624259cf2737f7e29c2fdbbc7f255e842eb2" # # Stacked Regressions to predict House Prices # # # ## Serigne # # **July 2017** # # **If you use parts of this notebook in your scripts/notebooks, giving some kind of credit would be very much appreciated :) You can for instance link back to this notebook. Thanks!** # + [markdown] _cell_guid="735c5797-4457-4e16-b21f-7f0982f2f016" _execution_state="idle" _uuid="35143ae31bde76e140a55855b89e0d42b56160a3" # This competition is very important to me as it helped me to begin my journey on Kaggle few months ago. I've read some great notebooks here. To name a few: # # 1. [Comprehensive data exploration with Python][1] by **<NAME>** : Great and very motivational data analysis # # 2. [A study on Regression applied to the Ames dataset][2] by **<NAME>** : Thorough features engeneering and deep dive into linear regression analysis but really easy to follow for beginners. # # 3. [Regularized Linear Models][3] by **<NAME>** : Great Starter kernel on modelling and Cross-validation # # I can't recommend enough every beginner to go carefully through these kernels (and of course through many others great kernels) and get their first insights in data science and kaggle competitions. # # After that (and some basic pratices) you should be more confident to go through [this great script][7] by **Human Analog** who did an impressive work on features engeneering. # # As the dataset is particularly handy, I decided few days ago to get back in this competition and apply things I learnt so far, especially stacking models. For that purpose, we build two stacking classes ( the simplest approach and a less simple one). # # As these classes are written for general purpose, you can easily adapt them and/or extend them for your regression problems. # The overall approach is hopefully concise and easy to follow.. # # The features engeneering is rather parsimonious (at least compared to some others great scripts) . It is pretty much : # # - **Imputing missing values** by proceeding sequentially through the data # # - **Transforming** some numerical variables that seem really categorical # # - **Label Encoding** some categorical variables that may contain information in their ordering set # # - [**Box Cox Transformation**][4] of skewed features (instead of log-transformation) : This gave me a **slightly better result** both on leaderboard and cross-validation. # # - ** Getting dummy variables** for categorical features. # # Then we choose many base models (mostly sklearn based models + sklearn API of DMLC's [XGBoost][5] and Microsoft's [LightGBM][6]), cross-validate them on the data before stacking/ensembling them. The key here is to make the (linear) models robust to outliers. This improved the result both on LB and cross-validation. # # [1]: https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python # [2]:https://www.kaggle.com/juliencs/a-study-on-regression-applied-to-the-ames-dataset # [3]: https://www.kaggle.com/apapiu/regularized-linear-models # [4]: http://onlinestatbook.com/2/transformations/box-cox.html # [5]: https://github.com/dmlc/xgboost # [6]: https://github.com/Microsoft/LightGBM # [7]: https://www.kaggle.com/humananalog/xgboost-lasso # # To my surprise, this does well on LB ( 0.11420 and top 4% the last time I tested it : **July 2, 2017** ) # # # + [markdown] _cell_guid="d006e9e6-e191-4918-b8c1-5730f1d08e77" _uuid="ae5d947988d70f9ccbea3345f396c8394e7b1e4b" # **Hope that at the end of this notebook, stacking will be clear for those, like myself, who found the concept not so easy to grasp** # + _cell_guid="2dbccbd6-138b-4f1b-9b23-fd60c7525c14" _execution_state="idle" _uuid="c9b1d5dff21d39260eb47af6fe7aac4bd03be233" #import some necessary librairies import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # %matplotlib inline import matplotlib.pyplot as plt # Matlab-style plotting import seaborn as sns color = sns.color_palette() sns.set_style('darkgrid') import warnings def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn #ignore annoying warning (from sklearn and seaborn) from scipy import stats from scipy.stats import norm, skew #for some statistics pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) #Limiting floats output to 3 decimal points # + _cell_guid="59617b4b-d797-44ce-9142-05fbfd36aada" _execution_state="idle" _uuid="0e694d13459e3e200f6e2c6333c887cbad779ba9" #Now let's import and put the train and test datasets in pandas dataframe train = pd.read_csv('train.csv') test = pd.read_csv('test.csv') # + _cell_guid="3678529f-9d76-4853-88c5-4b2d230a85b6" _execution_state="idle" _uuid="3a32f51460a02fbe7a9122db55a740eb378dda97" ##display the first five rows of the train dataset. train.head(5) # + _cell_guid="ff37c1ba-8679-49e0-b3c8-9c53d01b1b04" _execution_state="idle" _uuid="816b1463b3dd0daf44949a1fa15ebfbc0e2f1235" ##display the first five rows of the test dataset. test.head(5) # + _cell_guid="b24451a1-fb8c-4094-ad0b-0940469d07fc" _execution_state="idle" _uuid="687813c270cbfdedccc7a9e4ec9fbb78a99d54ed" #check the numbers of samples and features print("The train data size before dropping Id feature is : {} ".format(train.shape)) print("The test data size before dropping Id feature is : {} ".format(test.shape)) #Save the 'Id' column train_ID = train['Id'] test_ID = test['Id'] #Now drop the 'Id' colum since it's unnecessary for the prediction process. train.drop("Id", axis = 1, inplace = True) test.drop("Id", axis = 1, inplace = True) #check again the data size after dropping the 'Id' variable print("\nThe train data size after dropping Id feature is : {} ".format(train.shape)) print("The test data size after dropping Id feature is : {} ".format(test.shape)) # + [markdown] _cell_guid="7d5829c4-b2f1-4ef3-8b02-11f02eb7aabf" _execution_state="idle" _uuid="228cb602f1c7a47d3c5250514cab57f7e7bc75e5" # # Data Processing # + [markdown] _cell_guid="993f8fc0-1f5a-4432-80bc-6024b7bbc855" _execution_state="idle" _uuid="21bab04b0e0a451c912695d9238bf8304fc009d4" # ## Outliers # + [markdown] _cell_guid="2b19b08e-b8bf-44b0-b83d-ee3efd0b833e" _execution_state="idle" _uuid="70fc7612c38957f7418d03a409ff8bae21d522a9" # [Documentation][1] for the Ames Housing Data indicates that there are outliers present in the training data # [1]: http://ww2.amstat.org/publications/jse/v19n3/Decock/DataDocumentation.txt # + [markdown] _cell_guid="465043f2-d687-4b1f-a6b4-1036859dfeb0" _execution_state="idle" _uuid="32b12bca723c5e867f7d7a7e179ff934a5fcdf30" # Let's explore these outliers # # + _cell_guid="637bd0fd-7508-41d1-b240-ea0e8598dddf" _execution_state="idle" _uuid="8903aa1a4a700aa2160edb3baf806f3800ae7d9a" fig, ax = plt.subplots() ax.scatter(x = train['GrLivArea'], y = train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # + [markdown] _cell_guid="30304b82-5846-4142-bc31-b629158fb040" _execution_state="idle" _uuid="edf186dc5169e450392ee8f809cc3de5d10d7dbd" # We can see at the bottom right two with extremely large GrLivArea that are of a low price. These values are huge oultliers. # Therefore, we can safely delete them. # + _cell_guid="6c5780b2-d4a8-42d9-b902-c6a23eef7d99" _execution_state="idle" _uuid="583bb417102d7bebb4aaf14bcb1aebcae86443bb" #Deleting outliers train = train.drop(train[(train['GrLivArea']>4000) & (train['SalePrice']<300000)].index) #Check the graphic again fig, ax = plt.subplots() ax.scatter(train['GrLivArea'], train['SalePrice']) plt.ylabel('SalePrice', fontsize=13) plt.xlabel('GrLivArea', fontsize=13) plt.show() # + [markdown] _cell_guid="e24be1ff-e186-4d0f-9ba1-64195c0eec4d" _execution_state="idle" _uuid="0f186c5806f14de1e9ea46ece78a4bed2a6830a7" # ### Note : # Outliers removal is note always safe. We decided to delete these two as they are very huge and really bad ( extremely large areas for very low prices). # # There are probably others outliers in the training data. However, removing all them may affect badly our models if ever there were also outliers in the test data. That's why , instead of removing them all, we will just manage to make some of our models robust on them. You can refer to the modelling part of this notebook for that. # + [markdown] _cell_guid="f4dcb348-634e-4010-b0a1-27976a1d8353" _execution_state="idle" _uuid="886ad7c816f4c1fd9afda53b10990baf987e86d8" # ## Target Variable # + [markdown] _cell_guid="658f5b56-5830-486c-81a0-8514fb95e274" _execution_state="idle" _uuid="4b96a6a35983d1c765c11c929bcd32effd105b43" # **SalePrice** is the variable we need to predict. So let's do some analysis on this variable first. # + _cell_guid="a17ad845-6fca-4d47-8e44-7c4c44f0427d" _execution_state="idle" _uuid="be3b0157031685ed3dbc31a657ba712312691830" sns.distplot(train['SalePrice'] , fit=norm); # Get the fitted parameters used by the function (mu, sigma) = norm.fit(train['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Now plot the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Get also the QQ-plot fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) plt.show() # + [markdown] _cell_guid="313a535c-86c4-4db5-96de-6f65bc4adc2f" _execution_state="idle" _uuid="1da2d9831ae6c94b3f6304185f02896a9ee40aa5" # The target variable is right skewed. As (linear) models love normally distributed data , we need to transform this variable and make it more normally distributed. # + [markdown] _cell_guid="8df72eef-77de-4a71-aa6a-4b91784a7232" _execution_state="idle" _uuid="421775277fdab4e5a05f74aa4ea92e712a743928" # **Log-transformation of the target variable** # + _cell_guid="21b3a0ad-bd68-49aa-a3d7-40a30b3c59dc" _execution_state="idle" _uuid="719cf6a9dca56cc529e97af21816d291fa8bd8c0" #We use the numpy fuction log1p which applies log(1+x) to all elements of the column train["SalePrice"] = np.log1p(train["SalePrice"]) #Check the new distribution sns.distplot(train['SalePrice'] , fit=norm); # Get the fitted parameters used by the function (mu, sigma) = norm.fit(train['SalePrice']) print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma)) #Now plot the distribution plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') #Get also the QQ-plot fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) plt.show() # + [markdown] _cell_guid="51620309-727a-4445-a96a-d9851880d31f" _execution_state="idle" _uuid="991e699566f4292490fda326703baa33ce09173f" # The skew seems now corrected and the data appears more normally distributed. # + [markdown] _cell_guid="802df76d-0e0b-4868-ba16-91335568d2d7" _execution_state="idle" _uuid="827a86d65c6d176f4af55224b91b44a47966652d" # ## Features engineering # + [markdown] _cell_guid="a5ad11e4-0388-417c-bf77-cc7752f4c5a0" _execution_state="idle" _uuid="5fc214fb6df16c026dec0dfcb99af1c1b7744b56" # let's first concatenate the train and test data in the same dataframe # + _cell_guid="1bd3e9b9-2f42-4251-aadd-5ced84eb1a27" _execution_state="idle" _uuid="efc576211e4eed962f04cd94d901c667e6912528" ntrain = train.shape[0] ntest = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat((train, test)).reset_index(drop=True) all_data.drop(['SalePrice'], axis=1, inplace=True) print("all_data size is : {}".format(all_data.shape)) # + [markdown] _cell_guid="9ce95008-a3b9-43fa-bc4e-649ca0f43768" _execution_state="idle" _uuid="abe25f3032a0bed179d58d5911cb42d97b35841b" # ### Missing Data # + _cell_guid="501b465f-8c80-4b93-81d0-a5d41e08d235" _execution_state="idle" _uuid="f97d25548ec8f6c02e2d1ee5a6df6c3d107fdf53" all_data_na = (all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({'Missing Ratio' :all_data_na}) missing_data.head(20) # + _cell_guid="1c80610d-8f19-43c8-bd54-7d786b0dca49" _execution_state="idle" _uuid="2eb6e1361884db6a4f65afc3b158fcbe85c2392e" f, ax = plt.subplots(figsize=(15, 12)) plt.xticks(rotation='90') sns.barplot(x=all_data_na.index, y=all_data_na) plt.xlabel('Features', fontsize=15) plt.ylabel('Percent of missing values', fontsize=15) plt.title('Percent missing data by feature', fontsize=15) # + [markdown] _cell_guid="5d88502a-2484-45d7-a750-0d32075133dd" _execution_state="idle" _uuid="dbf28eddefa2c0825781a31cde823ac5ece5a1ee" # **Data Correlation** # # + _cell_guid="3fc115c7-33bb-456d-b3e5-4dd985bfbc9f" _execution_state="idle" _uuid="0f6b9912c752212f3f4bdca0b60f5fd01c12d2a0" #Correlation map to see how features are correlated with SalePrice corrmat = train.corr() plt.subplots(figsize=(12,9)) sns.heatmap(corrmat, vmax=0.9, square=True) # + [markdown] _cell_guid="cd681698-02d2-473e-bfc8-2d98a1353a18" _execution_state="idle" _uuid="41a6b40f8f03212a624f54167cad456a9f193f93" # ### Imputing missing values # + [markdown] _cell_guid="e717ffdc-b536-4f6c-8008-e520043a5d3d" _execution_state="idle" _uuid="24d64c66f2a2329437743756194d21893ee6dd1f" # We impute them by proceeding sequentially through features with missing values # + [markdown] _cell_guid="6f7d94ef-5ffb-4e1b-b1a2-a917cce3f357" _execution_state="idle" _uuid="d6d054aff0098e4e602ef618e0a4d3706d71dd66" # - **PoolQC** : data description says NA means "No Pool". That make sense, given the huge ratio of missing value (+99%) and majority of houses have no Pool at all in general. # + _cell_guid="ca8a04eb-f42b-4c26-a690-bb98c95c6118" _execution_state="idle" _uuid="1d94b062f7683d711d479e48530009040185fd4c" all_data["PoolQC"] = all_data["PoolQC"].fillna("None") # + [markdown] _cell_guid="ea3aecb9-b2e8-4cdc-853d-999f8e2f789c" _execution_state="idle" _uuid="0516efe507d7176591f6af3d503312db5038949a" # - **MiscFeature** : data description says NA means "no misc feature" # # + _cell_guid="689863b8-4e4e-45d7-9972-8894e8defbe2" _execution_state="idle" _uuid="c311993a26d66d66ce82584a3e00d7ed56a3ea5a" all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None") # + [markdown] _cell_guid="d467b9e0-0793-41e5-aaa1-d1686946ba9f" _execution_state="idle" _uuid="44cfbd9eecce5e4f2bc91a72ed095270e6010f57" # - **Alley** : data description says NA means "no alley access" # + _cell_guid="0f822e30-09f3-45f8-b09e-0ffed4ff985c" _execution_state="idle" _uuid="35709caf5cb2b0220cd9043f76462a116de15059" all_data["Alley"] = all_data["Alley"].fillna("None") # + [markdown] _cell_guid="0bab3349-eddb-4977-acd5-8dad4553ff7f" _execution_state="idle" _uuid="3265fc176140dbf1b641e18f2d57b91554c85771" # - **Fence** : data description says NA means "no fence" # + _cell_guid="c0d6e4f4-df14-467f-bab2-86bfd0493a35" _execution_state="idle" _uuid="6d9d6998298a7fea5677f0916bcc9f33dc99e231" all_data["Fence"] = all_data["Fence"].fillna("None") # + [markdown] _cell_guid="49747765-81f1-4b23-ad63-534c7fe48b7a" _execution_state="idle" _uuid="4869889d4b2b118bc6dcef239cbc27546a7b445e" # - **FireplaceQu** : data description says NA means "no fireplace" # + _cell_guid="22b88c5b-6419-424f-9805-2c2b11e85dca" _execution_state="idle" _uuid="fb8cdb59e4ecba1f7ec78eb3b34c7209c34b1c86" all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None") # + [markdown] _cell_guid="357994d1-974a-49ee-98e4-a19bc524fcb6" _execution_state="idle" _uuid="2b00af603b7dbc9114e62c421b2b886154d31959" # - **LotFrontage** : Since the area of each street connected to the house property most likely have a similar area to other houses in its neighborhood , we can **fill in missing values by the median LotFrontage of the neighborhood**. # + _cell_guid="b8cd4872-67c6-4c82-a1a3-807d08efb658" _execution_state="idle" _uuid="56c268a7217e85301d2847ceeca410d62e781b89" #Group by neighborhood and fill in missing value by the median LotFrontage of all the neighborhood all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median())) # + [markdown] _cell_guid="c0432c37-f664-4da1-a4d8-87a7cb450bd1" _execution_state="idle" _uuid="53712c1d6c7cf2945c6ba8b537171ea6b84f085e" # - **GarageType, GarageFinish, GarageQual and GarageCond** : Replacing missing data with None # + _cell_guid="f264979e-f5e2-4aae-b313-7468570c0294" _execution_state="idle" _uuid="d556da5418394ed50d3c14bd02668714cee0fb4a" for col in ('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'): all_data[col] = all_data[col].fillna('None') # + [markdown] _cell_guid="92173e82-fd12-4fa0-b7dd-f66fb7b0be3b" _execution_state="idle" _uuid="b38ea3ec1c4b8fa4b31e8f86f378798319c05a71" # - **GarageYrBlt, GarageArea and GarageCars** : Replacing missing data with 0 (Since No garage = no cars in such garage.) # # + _cell_guid="ada7acfa-8bc3-4d52-9f42-28a78cefc950" _execution_state="idle" _uuid="2726cce29df2a2621c54c4c8a06eb570a7191552" for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'): all_data[col] = all_data[col].fillna(0) # + [markdown] _cell_guid="b9f955c5-4e03-4835-8df7-1284120453ff" _execution_state="idle" _uuid="1729c36d95461d03679eb95e18eaac3d37aeb076" # - **BsmtFinSF1, BsmtFinSF2, BsmtUnfSF, TotalBsmtSF, BsmtFullBath and BsmtHalfBath** : missing values are likely zero for having no basement # + _cell_guid="dd9e7ef7-3cee-437a-9a33-20987e238425" _execution_state="idle" _uuid="3008a6bd07ed883ff92a83ebe390940a24010fcb" for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): all_data[col] = all_data[col].fillna(0) # + [markdown] _cell_guid="441b397e-c527-4bd5-8d3c-df6aeba72192" _execution_state="idle" _uuid="d4ed0fcea9f8bb309118ba2fb703431e386c6cbd" # - **BsmtQual, BsmtCond, BsmtExposure, BsmtFinType1 and BsmtFinType2** : For all these categorical basement-related features, NaN means that there is no basement. # # + _cell_guid="6d7aa4f6-41b9-4f27-acb0-8af6aca2c8ff" _execution_state="idle" _uuid="ee281d3c691a50795e93bb797603c697d320b286" for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): all_data[col] = all_data[col].fillna('None') # + [markdown] _cell_guid="e7f2515f-8438-4075-9d22-0b37c1a2ef01" _execution_state="idle" _uuid="556165bc959cd6ff2354cb774341ac947d052f6a" # - **MasVnrArea and MasVnrType** : NA most likely means no masonry veneer for these houses. We can fill 0 for the area and None for the type. # # + _cell_guid="63a3a030-482f-4292-a887-16880f7a2882" _execution_state="idle" _uuid="800ff604bb70023baf1d403b53b674b765d9047c" all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None") all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0) # + [markdown] _cell_guid="f2f7f337-de24-44ec-93a6-09428ec9a252" _execution_state="idle" _uuid="ded1f7ea0443654bd033eeed1361056f820a925b" # - **MSZoning (The general zoning classification)** : 'RL' is by far the most common value. So we can fill in missing values with 'RL' # # + _cell_guid="0e4f28b5-784c-4369-92f4-bda15fa55172" _execution_state="idle" _uuid="ca93ce9b34c853c1bb11d837eaff6c2986234757" all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode()[0]) # + [markdown] _cell_guid="99310a4b-9dbf-4273-81b5-94bf0adf338c" _execution_state="idle" _uuid="de39073aef3db7d13894fa82285eff803d6e5c1b" # - **Utilities** : For this categorical feature all records are "AllPub", except for one "NoSeWa" and 2 NA . Since the house with 'NoSewa' is in the training set, **this feature won't help in predictive modelling**. We can then safely remove it. # # + _cell_guid="facec65d-945f-4be1-86c8-1be011cc2bd0" _execution_state="idle" _uuid="de8ee2aca9eb3eeed7442cd9d12843cc64746708" all_data = all_data.drop(['Utilities'], axis=1) # + [markdown] _cell_guid="c6aabfcd-4a1d-4845-88a1-b2f5cb7d8901" _execution_state="idle" _uuid="3450d048089ba8ff98807c6ae3b745b0a34165f9" # - **Functional** : data description says NA means typical # + _cell_guid="c968aa5e-c34f-442e-9388-69e074a2c84e" _execution_state="idle" _uuid="e2d331e991fa868320b5d089f61c58b54ce7cb37" all_data["Functional"] = all_data["Functional"].fillna("Typ") # + [markdown] _cell_guid="4f2bad4d-8db8-4ac3-8991-ffa2c775a72d" _execution_state="idle" _uuid="c122f5b5596bc4ce615c6b620b7e1824a54a47b8" # - **Electrical** : It has one NA value. Since this feature has mostly 'SBrkr', we can set that for the missing value. # # + _cell_guid="d93eab84-7759-4201-bd7d-d450399478f7" _execution_state="idle" _uuid="d31761ee8878ce4143cf21adde149cd622c5a039" all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode()[0]) # + [markdown] _cell_guid="3d089b8e-3392-4067-a5a1-4f6d4e3e8fb5" _execution_state="idle" _uuid="8e0fd54a802f1eddfec0aa4104412c19e1c266d0" # - **KitchenQual**: Only one NA value, and same as Electrical, we set 'TA' (which is the most frequent) for the missing value in KitchenQual. # # + _cell_guid="9f68e47a-5b50-436f-9e31-ff65a1cb1687" _execution_state="idle" _uuid="23071ddb69b67b1d742bb15d260421d57aed5583" all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode()[0]) # + [markdown] _cell_guid="e32aec7d-2841-4bfc-a6d2-d181000af4b8" _execution_state="idle" _uuid="e8aca612daf293e7228eed9647370036d003e0fb" # - **Exterior1st and Exterior2nd** : Again Both Exterior 1 & 2 have only one missing value. We will just substitute in the most common string # # + _cell_guid="85903feb-7fdb-4911-9a6a-f29629bbf308" _execution_state="idle" _uuid="6375e9c7c145e0f2832cbe03addbce3d90cb44c4" all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode()[0]) all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode()[0]) # + [markdown] _cell_guid="b87d629d-6820-4f3e-80f3-efc16436b982" _execution_state="idle" _uuid="67f41f0ad648d536d70e2ccb617a4b495fb7f782" # - **SaleType** : Fill in again with most frequent which is "WD" # + _cell_guid="aaa407ac-ad4a-4313-8530-ac96826be6e5" _execution_state="idle" _uuid="06c104efedaf57a4052c8f61c502cb6313e0beea" all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode()[0]) # + [markdown] _cell_guid="8ddca94e-7cfa-4f25-9333-29e013c9c30d" _execution_state="idle" _uuid="8a3a35f5dc00732929143480a8a0b34a2a502c27" # - **MSSubClass** : Na most likely means No building class. We can replace missing values with None # # + _cell_guid="23dedd0c-7d5e-4870-8588-8bc4890f627b" _execution_state="idle" _uuid="9608a2eada6013e7f14e9341862cca0dca4621e9" all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None") # + [markdown] _cell_guid="9193270b-8e7b-4a24-b769-d6eb661a1ae7" _execution_state="idle" _uuid="465570fcce8944ee31b1709ec6954d03cbf32ff8" # Is there any remaining missing value ? # + _cell_guid="0adf05cf-ce60-4169-805c-ca776e60e85a" _execution_state="idle" _uuid="b091fa2ebef19425019e2e550410d0376b9e9fac" #Check remaining missing values if any all_data_na = (all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index).sort_values(ascending=False) missing_data = pd.DataFrame({'Missing Ratio' :all_data_na}) missing_data.head() # + [markdown] _cell_guid="78266762-5180-44fa-a630-b808706800d4" _execution_state="idle" _uuid="360f518886ac45afe2963b9b53edb17c2be4a130" # It remains no missing value. # # + [markdown] _cell_guid="7996debf-c724-4365-9d63-f6104bae6140" _execution_state="idle" _uuid="915505b9c21b4dd84cc466660a68fb0d6b84cfae" # ### More features engeneering # + [markdown] _cell_guid="0d5076fa-b4ab-4787-bebb-8fd445b8815a" _execution_state="idle" _uuid="1cfcb671a97068569efb4e7855da91aa30c5bbde" # **Transforming some numerical variables that are really categorical** # + _cell_guid="a52dc2f9-ca02-4024-987a-165ce630b356" _execution_state="idle" _uuid="cc7557817a4442e799e4e4c84dd1efd8bd08867a" #MSSubClass=The building class all_data['MSSubClass'] = all_data['MSSubClass'].apply(str) #Changing OverallCond into a categorical variable all_data['OverallCond'] = all_data['OverallCond'].astype(str) #Year and month sold are transformed into categorical features. all_data['YrSold'] = all_data['YrSold'].astype(str) all_data['MoSold'] = all_data['MoSold'].astype(str) # + [markdown] _cell_guid="9f80c0e7-3f3f-45c5-b111-e36f4e31e814" _execution_state="idle" _uuid="c4743ffb7fbb050edca7c77dc7cb6520577c1398" # **Label Encoding some categorical variables that may contain information in their ordering set** # + _cell_guid="81c97efb-4f76-4e87-861a-10a60ab5c84b" _execution_state="idle" _uuid="fdb5ddf0a49a3c6df303c569c9f3509c79ac8b61" from sklearn.preprocessing import LabelEncoder cols = ('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold') # process columns, apply LabelEncoder to categorical features for c in cols: lbl = LabelEncoder() lbl.fit(list(all_data[c].values)) all_data[c] = lbl.transform(list(all_data[c].values)) # shape print('Shape all_data: {}'.format(all_data.shape)) # + [markdown] _cell_guid="a4879ef7-ab0d-4955-bc48-7ebcfa04b3bd" _execution_state="idle" _uuid="9976d6288bc183d443fbccc2bde439d5bc3a87b1" # **Adding one more important feature** # + [markdown] _cell_guid="b9486529-a3d2-443d-8a90-6bfbc406583b" _execution_state="idle" _uuid="ab22a33cf69e8092b3c4ae0a80b1f5f31b67edbc" # Since area related features are very important to determine house prices, we add one more feature which is the total area of basement, first and second floor areas of each house # + _cell_guid="fc1a8f1a-f003-4538-8e60-d819f46362a3" _execution_state="idle" _uuid="208f8d22188786227fff4a978dc3b11b4e1ffd90" # Adding total sqfootage feature all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF'] # + [markdown] _cell_guid="91c73aad-82d1-4301-b540-b2f69dc13902" _execution_state="idle" _uuid="aa36d6e3253e354b46d9c9c6f2e8a4089c76be16" # **Skewed features** # + _cell_guid="c5972a73-7e86-4164-a9d6-58432dae1933" _execution_state="idle" _uuid="53c471c7008c66590f257e70866f8a3037813f13" numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index # Check the skew of all numerical features skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False) print("\nSkew in numerical features: \n") skewness = pd.DataFrame({'Skew' :skewed_feats}) skewness.head(10) # + [markdown] _cell_guid="9f110087-b707-4073-a1df-0a0a9d6ccbd3" _execution_state="idle" _uuid="cf63bdc9f4f80d81f1bfa14f89d65ff104d45e5b" # **Box Cox Transformation of (highly) skewed features** # + [markdown] _cell_guid="d1d18243-42d8-4a21-808d-784c21e53973" _execution_state="idle" _uuid="eab0b4c0a85ae2fbe1bdeea0eedd113904ef3eb1" # We use the scipy function boxcox1p which computes the Box-Cox transformation of **\\(1 + x\\)**. # # Note that setting \\( \lambda = 0 \\) is equivalent to log1p used above for the target variable. # # See [this page][1] for more details on Box Cox Transformation as well as [the scipy function's page][2] # [1]: http://onlinestatbook.com/2/transformations/box-cox.html # [2]: https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.special.boxcox1p.html # + _cell_guid="d8ebce87-c55d-46c6-8f06-8b34116d7370" _execution_state="idle" _uuid="969fdff338ef46f064d8f855782c96d322a264b1" skewness = skewness[abs(skewness) > 0.75] print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0])) from scipy.special import boxcox1p skewed_features = skewness.index lam = 0.15 for feat in skewed_features: #all_data[feat] += 1 all_data[feat] = boxcox1p(all_data[feat], lam) #all_data[skewed_features] = np.log1p(all_data[skewed_features]) # + [markdown] _cell_guid="39639caf-31a4-4401-a663-0ba9536b39bf" _execution_state="idle" _uuid="5a13a6e2a3e48975de9129d1593bd38df44a1069" # **Getting dummy categorical features** # + _cell_guid="c8e63516-e4e2-4f36-a60e-1c8316392c60" _execution_state="idle" _uuid="acd44e283867425257ffd1fb2f4893cdbff43f67" all_data = pd.get_dummies(all_data) print(all_data.shape) # + [markdown] _cell_guid="243cf047-c2ba-4ae5-a531-22ef9b7cfbfe" _execution_state="idle" _uuid="fe9d78c7e37142ee8089826eca3065e0fa5803c1" # Getting the new train and test sets. # + _cell_guid="0a75646f-1974-40ad-a085-ff7bc08454a5" _execution_state="idle" _uuid="89e464095544a53177d5a009b914ba4c660072a7" train = all_data[:ntrain] test = all_data[ntrain:] # + [markdown] _cell_guid="461af83d-a928-4645-8512-5e4dbcaf7be0" _execution_state="idle" _uuid="10aab4cee97832560e2627a490e01e80c0ffb814" # # Modelling # + [markdown] _cell_guid="811925a6-341f-4cae-89c9-00983868a6b2" _execution_state="idle" _uuid="be4e4b315682b26359eba1ba3d65022aca9501e1" # **Import librairies** # + _cell_guid="135e8ac5-ce46-4a5f-b205-13f827ef33b8" _execution_state="idle" _uuid="fc664fbe27561a3697d0210921107b0e14b7d211" from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.kernel_ridge import KernelRidge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone from sklearn.model_selection import KFold, cross_val_score, train_test_split from sklearn.metrics import mean_squared_error import xgboost as xgb import lightgbm as lgb # + [markdown] _cell_guid="7aa6ebb9-27a7-4bd3-a7b2-4ddc6a0abbed" _execution_state="idle" _uuid="056b657c8f0de30d4708c600eabbb33684c64479" # **Define a cross validation strategy** # + [markdown] _cell_guid="4a2b5181-44f2-4c74-b482-aae0f5afc25a" _execution_state="idle" _uuid="dc0d7a3013f349988b3f2c84a6c130d6ad350170" # We use the **cross_val_score** function of Sklearn. However this function has not a shuffle attribut, we add then one line of code, in order to shuffle the dataset prior to cross-validation # + _cell_guid="f396260b-e182-4a87-9a2a-b92b9375ea6f" _execution_state="idle" _uuid="5c12551d092a6c5cf32d86398b054da7af3047b8" #Validation function n_folds = 5 def rmsle_cv(model): kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(train.values) rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf)) return(rmse) # + [markdown] _cell_guid="42e1565e-77a1-41a7-ac31-893e405d34ad" _execution_state="busy" _uuid="643ae2c4a88576ebbd55824ce8e654486087a6e0" # ## Base models # + [markdown] _cell_guid="578f088d-1a84-41cb-b945-ec64800f2308" _execution_state="idle" _uuid="370125198a0cbbc9336cbf179f00a2ebb02cb063" # - **LASSO Regression** : # # This model may be very sensitive to outliers. So we need to made it more robust on them. For that we use the sklearn's **Robustscaler()** method on pipeline # + _cell_guid="03f45cb7-0a40-45ea-94e8-64fd7ff1e8f6" _execution_state="idle" _uuid="2a50c954cb771d350c3092c3658486ba4d22aba5" lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1)) # + [markdown] _cell_guid="2c826f7b-ac66-421c-a7ae-29dfdd765bdb" _execution_state="idle" _uuid="30e9756cf63991715b48e8c53bc57906fc76f380" # - **Elastic Net Regression** : # # again made robust to outliers # + _cell_guid="e635cc7e-caeb-4f8b-ae78-c41f8eb0be59" _execution_state="idle" _uuid="b614cf1bdee86a3b1cbdde05298f9f7ae023799b" ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3)) # + [markdown] _cell_guid="7aae5316-4e32-4203-bff5-3b38c1f657c3" _execution_state="idle" _uuid="0775061bb477242f1332a048778e879ca540a216" # - **Kernel Ridge Regression** : # + _cell_guid="805343d9-0af6-43a2-a351-c0b25c62fcf0" _execution_state="idle" _uuid="3199c83513d93407c818ce1ed43c6c52e7f5a8c6" KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) # + [markdown] _cell_guid="5a66c27c-be80-4ec0-8953-eaeb2a7dd2e7" _execution_state="idle" _uuid="14b60a7e4296cccb39042c9c625a1480d59a01c1" # - **Gradient Boosting Regression** : # # With **huber** loss that makes it robust to outliers # # + _cell_guid="af13332c-fd37-40bb-a078-6bad6caaa2ab" _execution_state="idle" _uuid="9a983f0f62a0dde7689b20a8e52022bb189478b4" GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =5) # + [markdown] _cell_guid="d44ac87e-bf01-440b-ab22-b2868eb6ae48" _execution_state="idle" _uuid="53d7991f7dd03fcd7fb5ab1ec26fcd0614d002d3" # - **XGBoost** : # + _cell_guid="ed738a4c-c246-443c-a3c1-39df25f988b7" _execution_state="idle" _uuid="57c24b596ceb46d6f32ebf9501d672d7e469c15b" model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05, max_depth=3, min_child_weight=1.7817, n_estimators=2200, reg_alpha=0.4640, reg_lambda=0.8571, subsample=0.5213, silent=1, nthread = -1) # + [markdown] _cell_guid="a43ca74d-093c-4a56-a76c-b3223bf82fbc" _execution_state="idle" _uuid="460f3ccf7d5c33ea9f8a826bbf056d759e7b5119" # - **LightGBM** : # + _cell_guid="dd84d7db-3f83-4e4e-b02f-7632ca5ee4ac" _execution_state="idle" _uuid="4c94cf90f0ef0d350c5e66f3bd397865bfcc61ae" model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5, learning_rate=0.05, n_estimators=720, max_bin = 55, bagging_fraction = 0.8, bagging_freq = 5, feature_fraction = 0.2319, feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf =6, min_sum_hessian_in_leaf = 11) # + [markdown] _cell_guid="9e1eff6a-e937-45e4-96ef-41593e31e1bb" _execution_state="idle" _uuid="71bce529300e2f3d9f9f475d01bd7001258dbede" # ### Base models scores # + [markdown] _cell_guid="84ddecce-7671-44e5-919d-97348bf413f4" _execution_state="idle" _uuid="cae4987b8ec89e90a90d7826c4ec98d315cac00b" # Let's see how these base models perform on the data by evaluating the cross-validation rmsle error # + _cell_guid="2d0cc958-1654-425c-90ed-1ceb9edd7186" _execution_state="idle" _uuid="7d994349237b9304b0d17719e1af077e69288229" score = rmsle_cv(lasso) print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + _cell_guid="7cf6faaf-d69a-4268-b192-a9e60d207c28" _execution_state="idle" _uuid="b6d299b9d4a0cdb23ddd8459b3935da2948016d6" score = rmsle_cv(ENet) print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + _cell_guid="a1195106-2170-47f2-86a7-c4f3be683aa8" _execution_state="idle" _uuid="437dc093e88d661a369539520af1b4c37d1a0c1a" score = rmsle_cv(KRR) print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + _cell_guid="43dd152f-7c49-41b6-8f8e-a5864b1e2a71" _execution_state="idle" _uuid="e9d8c4bd191f77d8d275f53c0c1a6cf344151294" score = rmsle_cv(GBoost) print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + _cell_guid="30738ecc-39f8-44ed-9f42-68518beb7e6a" _execution_state="idle" _uuid="5f52ccf39d01165e61a7c6be8b788be4e58e286b" score = rmsle_cv(model_xgb) print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + _cell_guid="41e0eab9-630d-48d3-905b-e4663aad2262" _execution_state="idle" _uuid="5cd5377ee097fbc6fd14b42b4ea654221b097e59" score = rmsle_cv(model_lgb) print("LGBM score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std())) # + [markdown] _cell_guid="1114bc71-7eb5-4a7c-97a1-42a69cc21130" _execution_state="idle" _uuid="06d3adc16585b54a85113882975297c67672ea07" # ## Stacking models # + [markdown] _cell_guid="56746043-0d77-4687-a8f2-ae494efae3a8" _execution_state="idle" _uuid="2410d2172ddc108475db49214c52c21e66aeee59" # ### Simplest Stacking approach : Averaging base models # + [markdown] _cell_guid="96d5979d-73ba-4810-bee2-e1a7a8de57f6" _execution_state="idle" _uuid="c6e3a67facbc786ddec2f56b40b4da37726d1be5" # We begin with this simple approach of averaging base models. We build a new **class** to extend scikit-learn with our model and also to laverage encapsulation and code reuse ([inheritance][1]) # # # [1]: https://en.wikipedia.org/wiki/Inheritance_(object-oriented_programming) # + [markdown] _cell_guid="d0145496-896a-44e3-b01b-e12546328f06" _execution_state="idle" _uuid="5ecc887f1ab4001c872862cecf3a0b350ac51a23" # **Averaged base models class** # + _cell_guid="49e44ad6-8dc4-4a67-8079-adbac934fec4" _execution_state="idle" _uuid="ff3ee5889bcac40847909c3a71285d2b8f9d431f" class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models # we define clones of the original models to fit the data in def fit(self, X, y): self.models_ = [clone(x) for x in self.models] # Train cloned base models for model in self.models_: model.fit(X, y) return self #Now we do the predictions for cloned models and average them def predict(self, X): predictions = np.column_stack([ model.predict(X) for model in self.models_ ]) return np.mean(predictions, axis=1) # + [markdown] _cell_guid="825eb99f-d509-4203-b0c6-4ff77f696322" _execution_state="idle" _uuid="f05bf966ea7a7b5e6f8ca5d641ebd11281d54d0d" # **Averaged base models score** # + [markdown] _cell_guid="18209a57-f46d-4ce7-8331-834f419c57f2" _execution_state="idle" _uuid="b66ef29c829b7122a2e8e2d187211039570973ac" # We just average four models here **ENet, GBoost, KRR and lasso**. Of course we could easily add more models in the mix. # + _cell_guid="d480916f-89e7-4bcc-9b9d-b54492591654" _execution_state="idle" _uuid="81ce9e148b7e735f465b4b6508511dea44fbf791" averaged_models = AveragingModels(models = (ENet, GBoost, KRR, lasso)) score = rmsle_cv(averaged_models) print(" Averaged base models score: {:.4f} ({:.4f})\n".format(score.mean(), score.std())) # + [markdown] _cell_guid="588f9fd2-0c5e-43cd-8a0a-0271f2468ef7" _execution_state="idle" _uuid="421c03673969c6a2dd2253f9d4c503ab1276b105" # Wow ! It seems even the simplest stacking approach really improve the score . This encourages # us to go further and explore a less simple stacking approch. # + [markdown] _cell_guid="387761c7-9dc5-41aa-8cda-5315b6a72fbf" _execution_state="idle" _uuid="01b68302f0ec3af42a70794bc339bf5956ab2569" # ### Less simple Stacking : Adding a Meta-model # + [markdown] _cell_guid="cb18e314-968d-4765-942a-5706d0f4f815" _execution_state="idle" _uuid="69f216ce13eb61f0d07403986a2d38b11e18ae6a" # In this approach, we add a meta-model on averaged base models and use the out-of-folds predictions of these base models to train our meta-model. # # The procedure, for the training part, may be described as follows: # # # 1. Split the total training set into two disjoint sets (here **train** and .**holdout** ) # # 2. Train several base models on the first part (**train**) # # 3. Test these base models on the second part (**holdout**) # # 4. Use the predictions from 3) (called out-of-folds predictions) as the inputs, and the correct responses (target variable) as the outputs to train a higher level learner called **meta-model**. # # The first three steps are done iteratively . If we take for example a 5-fold stacking , we first split the training data into 5 folds. Then we will do 5 iterations. In each iteration, we train every base model on 4 folds and predict on the remaining fold (holdout fold). # # So, we will be sure, after 5 iterations , that the entire data is used to get out-of-folds predictions that we will then use as # new feature to train our meta-model in the step 4. # # For the prediction part , We average the predictions of all base models on the test data and used them as **meta-features** on which, the final prediction is done with the meta-model. # # + [markdown] _cell_guid="7a1d4b95-ad19-4522-8459-99f0839a49bb" _uuid="5e232cc0edea67af1fc672ee07a93c435e7e2f98" # ![Faron](http://i.imgur.com/QBuDOjs.jpg) # # (Image taken from [Faron](https://www.kaggle.com/getting-started/18153#post103381)) # + [markdown] _cell_guid="9e3f01cb-cd76-4861-81c0-775c613f7d7f" _uuid="7c4d68ee6e6dffdf0b816bee48fd55b29bd66386" # ![kaz](http://5047-presscdn.pagely.netdna-cdn.com/wp-content/uploads/2017/06/image5.gif) # # Gif taken from [KazAnova's interview](http://blog.kaggle.com/2017/06/15/stacking-made-easy-an-introduction-to-stacknet-by-competitions-grandmaster-marios-michailidis-kazanova/) # + [markdown] _cell_guid="b9ca03ee-0377-4313-a236-59d3d972cac3" _uuid="a46c5bab3855a4728c52765379c1674de3123a25" # On this gif, the base models are algorithms 0, 1, 2 and the meta-model is algorithm 3. The entire training dataset is # A+B (target variable y known) that we can split into train part (A) and holdout part (B). And the test dataset is C. # # B1 (which is the prediction from the holdout part) is the new feature used to train the meta-model 3 and C1 (which # is the prediction from the test dataset) is the meta-feature on which the final prediction is done. # + [markdown] _cell_guid="bd10661e-6eec-4789-83fa-d55b77619252" _execution_state="idle" _uuid="5374a729325ac38423ff82891f1cc887f14ba317" # **Stacking averaged Models Class** # + _cell_guid="03326750-2442-4e14-8774-6e2ce9330173" _execution_state="idle" _uuid="9115cf7180ba9491bd0a2c5bd566e18238c9de80" class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, base_models, meta_model, n_folds=5): self.base_models = base_models self.meta_model = meta_model self.n_folds = n_folds # We again fit the data on clones of the original models def fit(self, X, y): self.base_models_ = [list() for x in self.base_models] self.meta_model_ = clone(self.meta_model) kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156) # Train cloned base models then create out-of-fold predictions # that are needed to train the cloned meta-model out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models))) for i, model in enumerate(self.base_models): for train_index, holdout_index in kfold.split(X, y): instance = clone(model) self.base_models_[i].append(instance) instance.fit(X[train_index], y[train_index]) y_pred = instance.predict(X[holdout_index]) out_of_fold_predictions[holdout_index, i] = y_pred # Now train the cloned meta-model using the out-of-fold predictions as new feature self.meta_model_.fit(out_of_fold_predictions, y) return self #Do the predictions of all base models on the test data and use the averaged predictions as #meta-features for the final prediction which is done by the meta-model def predict(self, X): meta_features = np.column_stack([ np.column_stack([model.predict(X) for model in base_models]).mean(axis=1) for base_models in self.base_models_ ]) return self.meta_model_.predict(meta_features) # + [markdown] _cell_guid="da4c9354-b5c2-4994-8ffd-550416a5c4db" _execution_state="idle" _uuid="5fdbcf5c678b260adf80cf39b0f3bb63a26213e1" # **Stacking Averaged models Score** # + [markdown] _cell_guid="03abed3d-205c-411d-89de-b566b7f1f708" _execution_state="idle" _uuid="10fdbde25e455566637627554269adff7dfec193" # To make the two approaches comparable (by using the same number of models) , we just average **Enet KRR and Gboost**, then we add **lasso as meta-model**. # + _cell_guid="4db03a27-e9fb-484d-bbfe-2058f16dce77" _execution_state="idle" _uuid="f2c78b5950097660d3f8b84bade8d8dbdc3964f2" stacked_averaged_models = StackingAveragedModels(base_models = (ENet, GBoost, KRR), meta_model = lasso) score = rmsle_cv(stacked_averaged_models) print("Stacking Averaged models score: {:.4f} ({:.4f})".format(score.mean(), score.std())) # + [markdown] _cell_guid="61f0f9af-9264-4945-829a-c629ed6a3299" _execution_state="idle" _uuid="0ca396a31059f16aff47e0d53d011865634e101e" # We get again a better score by adding a meta learner # + [markdown] _cell_guid="1cc6527c-4705-4895-992f-0c3755b27cee" _execution_state="idle" _uuid="75e8303614ea910f93056a8bdc4cd9cfe62ecd46" # ## Ensembling StackedRegressor, XGBoost and LightGBM # + [markdown] _cell_guid="15f8fed4-bbf8-4eca-b400-8ea194010c78" _execution_state="idle" _uuid="5835af97aef41c60ea448988c606cd6a1f451712" # We add **XGBoost and LightGBM** to the** StackedRegressor** defined previously. # + [markdown] _cell_guid="5ab5b13e-78c1-49be-9bcb-e54a6bf119d7" _execution_state="idle" _uuid="9015eddf85323209a7729420affecb9940bdd7d3" # We first define a rmsle evaluation function # + _cell_guid="232c3959-c6e1-4535-8ad4-62892edc3f06" _execution_state="idle" _uuid="07f9ef433905b61a08a36790254d6a34661f0653" def rmsle(y, y_pred): return np.sqrt(mean_squared_error(y, y_pred)) # + [markdown] _cell_guid="999a8cc6-5083-4fca-bc90-616ac2f3ef8b" _execution_state="idle" _uuid="b7b74b70e6514b7623bc67cfec2b4f5d37c98707" # ### Final Training and Prediction # + [markdown] _cell_guid="717b4b02-8bcf-4df3-8994-f6a113110115" _execution_state="idle" _uuid="115d9e90a84c33213f0f0de7d86b6098f29ca7d8" # **StackedRegressor:** # + _cell_guid="e64b2750-1e32-4e91-affb-e583d6ca8722" _execution_state="busy" _uuid="8936479533c4bb147ab09f1d2133d8bacbf9afc1" stacked_averaged_models.fit(train.values, y_train) stacked_train_pred = stacked_averaged_models.predict(train.values) stacked_pred = np.expm1(stacked_averaged_models.predict(test.values)) print(rmsle(y_train, stacked_train_pred)) # + [markdown] _cell_guid="6c322757-44c0-4c81-a319-1aa6ccdf440f" _execution_state="idle" _uuid="06a0eafc07a8dae002f3fc1499849ebf7ec014be" # **XGBoost:** # + _cell_guid="2af45055-47aa-4e26-84df-ba5726bdff54" _execution_state="idle" _uuid="c80de2558910e4091f087a99bfcb202f01033ad7" model_xgb.fit(train, y_train) xgb_train_pred = model_xgb.predict(train) xgb_pred = np.expm1(model_xgb.predict(test)) print(rmsle(y_train, xgb_train_pred)) # + [markdown] _cell_guid="22b2b135-2af8-4dbb-a8f0-1fcd7f745a66" _execution_state="idle" _uuid="b6d1cdcc2bfc08d0eb58135878008e6d64987089" # **LightGBM:** # + _cell_guid="995d4c8e-db72-4370-a1ec-50e0c761f09a" _execution_state="idle" _uuid="65398376dca67e2aa78576108a0bb8160031c111" model_lgb.fit(train, y_train) lgb_train_pred = model_lgb.predict(train) lgb_pred = np.expm1(model_lgb.predict(test.values)) print(rmsle(y_train, lgb_train_pred)) # + _cell_guid="619452b2-c395-48fe-81ab-d6b1d355236b" _execution_state="idle" _uuid="07500cf506f6a90c6439c2dabf81ab966cf1c792" '''RMSE on the entire Train data when averaging''' print('RMSLE score on train data:') print(rmsle(y_train,stacked_train_pred*0.70 + xgb_train_pred*0.15'' + lgb_train_pred*0.15 )) # + [markdown] _cell_guid="844b5e21-7bd2-4a2b-9f7a-2e755ed06ecb" _execution_state="idle" _uuid="59443e95f66cb9e595cff9a3666824299239126b" # **Ensemble prediction:** # + _cell_guid="3ec2c58f-6bee-46a6-a263-1fe2cf3569cb" _execution_state="idle" _uuid="18996472b775bd9114fea7f08c8a554d4dafe774" ensemble = stacked_pred*0.70 + xgb_pred*0.15# + lgb_pred*0.15 # + [markdown] _cell_guid="434ca649-2fa0-46a5-ab29-7f403448ddf7" _execution_state="idle" _uuid="c9f02561da543f4901dcd2051acbd6c197108dd5" # **Submission** # + _cell_guid="3db46af9-e18a-43bb-9699-45b851f835e5" _execution_state="idle" _uuid="93f6915cf25c7bb6b6fa6e74ad7b853387ac1db5" sub = pd.DataFrame() sub['Id'] = test_ID sub['SalePrice'] = ensemble sub.to_csv('submission.csv',index=False) # + [markdown] _cell_guid="a35b0fbc-5235-4463-a86f-526a32b86956" _execution_state="idle" _uuid="8a08ae030e55075f00e4f5d9354610c9b88c4c24" # **If you found this notebook helpful or you just liked it , some upvotes would be very much appreciated - That will keep me motivated to update it on a regular basis** :-)
predictprice/Stacked Regressions Top 0.04 on LeaderBoard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <h1> Importando Bibliotecas </h1> import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from datetime import datetime import numpy as np import ipywidgets as widgets import plotly.express as px from ipywidgets import fixed # --- # <h1> 📖 Leitura dos Dados </h1> # <p>Nesta parte iremos realizar a leitura, ver as estatísticas descritivas destes assim como verificar a existência de dados nulos.</p> # + url = 'C:\projects\kc_house_eda\kc_house_data.csv' pd.set_option('display.float_format', lambda x: '%.2f' % x) def get_data(url): data = pd.read_csv(url) return data # - data = get_data(url) data data.describe() data.dtypes plt.figure(figsize=(8,6)) sns.heatmap(data.isnull()); # --- # <h1> 👩‍💻 Alteração dos Dados </h1> # <p>Ao verificarmos superficialmente os dados iremos realizar algumas alterações destes.</p> def data_alteration(data): data = get_data(url) # Altear a coluna date para datetime data['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d') # Classiticando a coluna yr_built que contém 0 para a data de 1970 data['yr_built'] = data['yr_built'].apply(lambda x: pd.to_datetime(x, format = '%Y') if x != 0 else pd.to_datetime('1970-01-01')) # Construindo a coluna contendo os anos de edifício data['yr_life'] = int(datetime.now().strftime('%Y')) - data['yr_built'].dt.year # Classificando o preço em levels data['level'] = data.price.apply(lambda x: 'level_0' if x <= data.price.quantile(0.25) else 'level_1' if (x > data.price.quantile(0.25)) & (x <= data.price.quantile(0.50)) else 'level_2' if (x > data.price.quantile(0.50)) & (x <= data.price.quantile(0.75)) else 'level_4') # Separando os meses em temporadas data['month'] = pd.to_datetime(data.date).dt.month data['season'] = data['month'].apply(lambda x: 'winter' if x <= 2 else 'spring' if (x > 2) & (x <= 5) else 'summer' if (x > 6) & (x <= 8) else 'fall') # Removendo dados duplicados e algumas colunas data.rename(columns = {'price_x': 'price', 'price_y': 'price_mean'}, inplace = True) data.drop(['sqft_living15', 'sqft_lot15'], axis = 1) return data new_data = data_alteration(data) new_data # ___ # <h1>🕵️ Análise Exploratória </h1> # # Nesta seção iremos realizar uma análise exploratória dos dados direcionado esta para a resolução de hipóteses de negócios chegada até nós. # # <ul> # <li><strong>Hipótese 1:</strong> Imóveis com vista para água são mais caros, se sim quantos % ?</li> # <li><strong>Hipótese 2:</strong> Imóveis antigos são mais baratos que os recentes, se sim quantos % ?</li> # <li><strong>Hipótese 3:</strong> Quais são os atributos que mais contribuem para o aumento do preço?</li> # </ul> # ___ # <h3> Hipótese de negócio 1</h3> # <ul> # <li>Imóveis com vista para água são mais caros, caso sim quantos %?</li> # </ul> plt.figure(figsize = (8,6)) data_group = new_data[['price', 'waterfront']].groupby('waterfront').mean().reset_index() sns.barplot(x = data_group['waterfront'], y = data_group['price']); valor = (data_group['price'].values[1] - data_group['price'].values[0]) / (data_group['price'].values[1] + data_group['price'].values[0]) valor.round(2)* 100 # ***Resposta:*** Verificamos que os locais com vista para água possui um valor de 50% maior que os locais que não tem vista para água # ___ # <h3> Hipótese de negócio 2</h3> # <ul> # <li>Imóveis antigos são mais baratos que os recentes, caso sim quantos %?</li> # </ul> group_data = new_data[['price', 'yr_life']].groupby('yr_life').mean().reset_index() plt.figure(figsize = (18,7)); plt.xticks(rotation = 60) sns.barplot(x = group_data['yr_life'], y = group_data['price']); # ***Resposta:*** Verificamos a idade do imóvel não é um fator determinante para a valorização ou desvalorização do preço do imóvel. # ___ # <h3> Hipótese de negócio 3</h3> # <ul> # <li>Quais os atributos que mais contribuem para o aumento do preço?</li> # </ul> # Para responder essa pergunta, iremos inicialmente analisar quais atributos terão uma maior correlação com o preço, tendo este que ser uma correlação maior que 50 %. df_corr = pd.DataFrame(new_data.corr()['price']).T plt.figure(figsize = (12,2)); sns.heatmap(df_corr[df_corr >= .5], annot=True); # Verificamos que existe uma correlação entre o design do edifício e a área com o preço do móvel, mas apesar disso correlação não significa necessariamente uma casualidade, para verificarmos se esta existe, realizaremos os gráficos abaixo: fig, axis = plt.subplots(2,2, figsize = (14,10)) sns.boxplot(x = new_data['bathrooms'], y = new_data['price'], ax = axis[0,0]); sns.scatterplot(x = new_data['sqft_living'], y= new_data['price'], ax = axis[0,1]); sns.boxplot(x = new_data['grade'], y= new_data['price'], ax = axis[1,0]); sns.scatterplot(x = new_data['sqft_above'], y= new_data['price'], ax = axis[1,1]); # ***Resposta:*** Podemos ver que existe uma relação # <ul> # <li><strong>Bathrooms x Price:</strong> verificamos que existe uma certa relação entre o a quantidade de banheiros do edifício e o preço deste, isso ocorre que a presença de áreas molhadas favorecem o encarecimento do edifício.</li> # <li><strong>Grade x Price:</strong> verificamos que existe uma forte relação entre o deseign do edifício e o preço deste, o que pode ser entendido que o gasto com acabamentos durante a construção é maior.</li> # <li><strong>Sqft x Price:</strong> como é de se esperar, existe uma relação entre a área e o preço de venda.</li> # </ul> # --- # <h1> 💼 Pergunta de Negócios </h1> # <h2>Quais imóves que deveríamos comprar e por qual preço?</h2> # # Para responder essa pergunta, iremos realizar as seguintes etapas: # <ul> # <li>Agrupar os dados por cep</li> # <li>Calcular a média dos preços por cep.</li> # <li>Realizar as condições para compra, neste caso utilizaremos três condições diferentes e verificar a quantidade resultante de imóveis em cada um deles.</li> # </ul> # Iremos explorar os outros cenários no intuito de verificar quais outros atributos que podem impactar em nossa análise. # ___ # **Primeiro cenário:** nesta iremos realizar uma análise contendo a **condição** do imóvel e o **preço médio** e verificar quantos imóveis para compra restaram desta seleção. data_select = new_data[['zipcode', 'price']].groupby('zipcode').mean().reset_index() df_select = pd.merge(new_data, data_select, how='inner', on = 'zipcode') df_select.rename(columns = {'price_x': 'price', 'price_y': 'price_mean'}, inplace = True) for i in range(len(df_select)): if (df_select.loc[i, 'price'] < df_select.loc[i, 'price_mean']) & (df_select.loc[i, 'condition'] >= 3): df_select.loc[i, 'Proposition'] = 'Buy' else: df_select.loc[i, 'Proposition'] = 'Not Buy' df_select plt.figure(figsize = (8,6)) sns.countplot(data = df_select, x = 'Proposition') # ___ # **Segundo cenário:** realizaremos uma análise contendo o **design** do edifício e o **preço médio**, retirada esta idéia do item anterior da qual descobrimos uma alta correlação entre o preço e o design do imóvel, verificando por fim quantos imóveis para compra restaram desta seleção. # + df_select_test = pd.merge(new_data, data_select, how='inner', on = 'zipcode') df_select_test.rename(columns = {'price_x': 'price', 'price_y': 'price_mean'}, inplace = True) for i in range(len(df_select_test)): if (df_select_test.loc[i, 'price'] < df_select_test.loc[i, 'price_mean']) & (df_select_test.loc[i, 'grade'] > 6): df_select_test.loc[i, 'Proposition'] = 'Buy' else: df_select_test.loc[i, 'Proposition'] = 'Not Buy' # - df_select_test plt.figure(figsize = (8,6)); sns.countplot(data = df_select_test, x = df_select_test.Proposition); # ___ # **Terceiro cenário:** aplicaremos tanto as variáveis das **condições** do imóvel assim como o **design** do edifício pelo **preço médio**, verificando quantos imóveis para compra resultaram desta seleção. # + data_select_test = new_data[['price', 'zipcode']].groupby('zipcode').mean().reset_index() df_select_test_2 = pd.merge(new_data, data_select_test, how = 'inner', on = 'zipcode'); df_select_test_2.rename(columns = {'price_x': 'price', 'price_y': 'price_mean'}, inplace = True) for i in range(len(df_select_test_2)): if (df_select_test_2.loc[i, 'price'] < df_select_test_2.loc[i, 'price_mean']) & (df_select_test_2.loc[i, 'condition'] > 3) & (df_select_test_2.loc[i, 'grade'] > 7): df_select_test_2.loc[i, 'Proposition'] = 'Buy' else: df_select_test_2.loc[i, 'Proposition'] = 'Not Buy' # - df_select_test_2 plt.figure(figsize=(8,6)) sns.countplot(data = df_select_test_2, x = df_select_test_2.Proposition); # ***Conclusão dos cenários e Escolha*** <br> # # Podemos ver que os três cenários são possíveis, isso depende de qual direção o negócio irá se deslocar assim como a quantidade de dinheiro disponível para a compra destes, pois quanto mais atributos inserimos para filtrar a quantidade de imóveis, menor a quantidade de imóveis como resultado. # # # Com isso iremos escolher o primeiro cenário (condições do imóvel x preço) pois achamos que este é um parâmentro que tem relação com o preço do imóvel. # ___ # <h2>Quais imóves que deveríamos vender e por qual preço?</h2> # Para respondermos essa pergunta iremos primeiro verificar o sobre os preços dos imóveis df_profit = df_select[df_select['Proposition'] == 'Buy'] plt.figure(figsize = (16,7)) plt.xticks(rotation = 60) sns.barplot(data = df_profit, x = df_profit['zipcode'], y = df_profit['price']); # Vemos com isso que alguns imóveis contém um preço bem elevado e outros não tanto assim. # # Outro aspecto que iremos verificar é se a distribuição de preço se mantém a mesma ou se ela sofre alterações conforme o tempo, com isso realizaremos o seguinte gráfico abaixo: plt.figure(figsize = (14,7)) sns.lineplot(data = df_profit, x = 'season', y = 'price'); # Vimos que durante o tempo os preços dos imóveis sofrem flutuações significativas, em relação a isso, iremos tomar duas decisões: # <ul> # <li>Caso os <strong>preços</strong> sejam menor que a <strong>mediana</strong>, iremos <strong>acrescentar</strong> um lucro em <strong>30%</strong> em relação ao preço de compra.</li> # Preço venda < Mediana = Preço * 1,30 <br><br> # <li>Caso os <strong>preços</strong> sejam maiior que a <strong>mediana</strong>, iremos <strong>acrescentar</strong> um lucro de <strong>10%</strong> em relação ao preço de compra.</li> # Preço venda < Mediana = Preço * 1,10 <br><br> # </ul> # + # Agrupando os dados de acordo com o cep, época e o preço df2 = df_profit[['zipcode','season','price']].groupby(['zipcode','season']).median().reset_index() df2 = df2.rename(columns={'price': 'price_median_season'}) # unir df2 com df_profit df_final = pd.merge(df_profit, df2, how ='inner', on=['zipcode','season']) df_final # - for i in range(len(df_final)): if df_final.loc[i, 'price'] < df_final.loc[i, 'price_median_season']: df_final.loc[i, 'sale_price'] = df_final.loc[i, 'price'] * 1.3 else: df_final.loc[i, 'sale_price'] = df_final.loc[i, 'price'] * 1.1 df_final['profit'] = df_final['sale_price'] - df_final['price'] df_final # O lucro final junto com os custos finais podem ser resumidos com a seguinte tabela: # + profit = df_final.profit.sum() sell = df_final.sale_price.sum() cost = df_final.price.sum() cost_profit = pd.DataFrame({'Sell': [sell], 'Cost': [cost], 'Profit': [profit]}) cost_profit # - # Nisso vemos que o temos um preço de vendas de ***6.322.951.472.30***, um custo de ***5.345.126.211.00*** gerando um lucro final de ***977.825.261.30***
EDA_house_price.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:miniconda3-analysis] # language: python # name: conda-env-miniconda3-analysis-py # --- import numpy as np import warnings warnings.filterwarnings('ignore') import xarray as xr from matplotlib import pyplot as plt import cartopy.crs as ccrs import cartopy.feature as cfeature from mpl_toolkits.basemap import Basemap #import sysz import importlib #sys.path.append("/glade/u/home/dlawren/python_programs") #from helpers import * #importlib.reload(helpers) # + sim1 = "b.e21.BSSP126cmip6.f09_g17.CMIP6-SSP1-2.6.001" sim2 = "b.e21.BSSP370cmip6.f09_g17.CMIP6-SSP3-7.0.001" #sim2 = "b.e21.BSSP585_BPRPcmip6.f09_g17.CMIP6-esm-ssp585.001" year2 = '2100' year1 = '2015' # - sim1 = "b.e21.BHIST.f09_g17.CMIP6-historical.010" sim2 = "b.e21.BHIST.f09_g17.CMIP6-hist-noLu.001" year2 = '2014' year1 = '1850' path = "/gpfs/fs1/collections/cdg/timeseries-cmip6/" path2 = "/lnd/proc/tseries/day_365/" sim1path = path+sim1+path2 sim2path = path+sim2+path2 var1 = "PCT_LANDUNIT" var2 = "PCT_NAT_PFT" ds1 = xr.open_mfdataset(sim1path+"*h3."+var1+".*") ds2 = xr.open_mfdataset(sim2path+"*h3."+var1+".*") da1 = ds1[var1] da2 = ds2[var1] ds1 = xr.open_mfdataset(sim1path+"*h3."+var2+".*") ds2 = xr.open_mfdataset(sim2path+"*h3."+var2+".*") da3 = ds1[var2] da4 = ds2[var2] landfrac=ds1.landfrac area=ds1.area lon=ds1.lon lat=ds1.lat type(lat) cropfrac1=da1.sel(time=year2).sel(ltype=1) cropfrac2=da2.sel(time=year2).sel(ltype=1) cropfrac1i=da1.sel(time=year1).sel(ltype=1) cropfrac2i=da2.sel(time=year1).sel(ltype=1) # + treefrac1 = da3.sel(time=year2).sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfrac1 = da3.sel(time=year2).sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natpft1 = da1.sel(time=year2).sel(ltype=0)/100 treefrac1 = treefrac1*natpft1*100 grassfrac1 = grassfrac1*natpft1*100 treefrac1i = da3.sel(time=year1).sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfrac1i = da3.sel(time=year1).sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natpft1i = da1.sel(time=year1).sel(ltype=0)/100 treefrac1i = treefrac1i*natpft1i*100 grassfrac1i = grassfrac1i*natpft1i*100 treefrac2 = da4.sel(time=year2).sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfrac2 = da4.sel(time=year2).sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natpft2 = da2.sel(time=year2).sel(ltype=0)/100 treefrac2 = treefrac2*natpft2*100 grassfrac2 = grassfrac2*natpft2*100 treefrac2i = da4.sel(time=year1).sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfrac2i = da4.sel(time=year1).sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natpft2i = da2.sel(time=year1).sel(ltype=0)/100 treefrac2i = treefrac2i*natpft2i*100 grassfrac2i = grassfrac2i*natpft2i*100 temp = cropfrac1.values[0] temp.shape #treefrac2 = da4.sel(time=year) # + vmax = 30 vmin = -vmax fig = plt.figure(figsize=(16,20)) ax1 = fig.add_subplot(1,3,1,projection=ccrs.Robinson()) im1 = ax1.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(cropfrac1[0, :, :]-cropfrac1i[0,:,:]).values,transform=ccrs.PlateCarree(),vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im1,ax=ax1,shrink=0.07,label='%') ax1.coastlines() ax1.stock_img() ax1.set_title('$\Delta$ Crop Percentage') ax1.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) fig.tight_layout() ax2 = fig.add_subplot(1,3,2,projection=ccrs.Robinson()) im2 = ax2.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(cropfrac2[0,:,:]-cropfrac2i[0,:,:]).values,transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im2,ax=ax2,shrink=0.07,label='%') ax2.coastlines() ax2.stock_img() ax2.set_title('$\Delta$ Crop Percentage') ax2.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) ax3 = fig.add_subplot(1,3,3,projection=ccrs.Robinson()) im3 = ax3.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(cropfrac1-cropfrac2).values[0],transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im3,ax=ax3,shrink=0.07,label='%') ax3.coastlines() ax3.stock_img() ax3.set_title('$\Delta$ Crop Percentage') ax3.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) #ax1.add_feature(cfeature.OCEAN, zorder=0) # + vmax = 30 vmin = -vmax fig = plt.figure(figsize=(16,20)) ax1 = fig.add_subplot(1,3,1,projection=ccrs.Robinson()) im1 = ax1.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(treefrac1[0, :, :]-treefrac1i[0,:,:]).values,transform=ccrs.PlateCarree(),vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im1,ax=ax1,shrink=0.07,label='%') ax1.coastlines() ax1.stock_img() ax1.set_title('$\Delta$ Tree Percentage') ax1.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) fig.tight_layout() ax2 = fig.add_subplot(1,3,2,projection=ccrs.Robinson()) im2 = ax2.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(treefrac2[0,:,:]-treefrac2i[0,:,:]).values,transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im2,ax=ax2,shrink=0.07,label='%') ax2.coastlines() ax2.stock_img() ax2.set_title('$\Delta$ Tree Percentage') ax2.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) ax3 = fig.add_subplot(1,3,3,projection=ccrs.Robinson()) im3 = ax3.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(treefrac1-treefrac2).values[0],transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im3,ax=ax3,shrink=0.07,label='%') ax3.coastlines() ax3.stock_img() ax3.set_title('$\Delta$ Tree Percentage') ax3.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) # + vmax = 40 vmin = -vmax fig = plt.figure(figsize=(16,20)) ax1 = fig.add_subplot(1,3,1,projection=ccrs.Robinson()) im1 = ax1.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(cropfrac1-cropfrac2).values[0],transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im1,ax=ax1,shrink=0.07,label='%') ax1.coastlines() ax1.stock_img() ax1.set_title('$\Delta$ Crop Percentage') ax1.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) fig.tight_layout() ax2 = fig.add_subplot(1,3,2,projection=ccrs.Robinson()) im2 = ax2.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(treefrac1-treefrac2).values[0],transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im2,ax=ax2,shrink=0.07,label='%') ax2.coastlines() ax2.stock_img() ax2.set_title('$\Delta$ Tree Percentage') ax2.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) ax3 = fig.add_subplot(1,3,3,projection=ccrs.Robinson()) im3 = ax3.pcolormesh(cropfrac1.lon.values,cropfrac1.lat.values,(grassfrac1-grassfrac2).values[0],transform=ccrs.PlateCarree(), vmax=vmax,vmin=vmin,cmap='BrBG') fig.colorbar(im3,ax=ax3,shrink=0.07,label='%') ax3.coastlines() ax3.stock_img() ax3.set_title('$\Delta$ Grass/Shrub Percentage') ax3.set_extent([-180,180,-65,80],crs=ccrs.PlateCarree()) # - # Time series plots cropfracts=da1.sel(ltype=1)/100 cropareats=landfrac*area*cropfracts cropareats=cropareats.sum(dim='lon').sum(dim='lat')/1.e6 cropfracts2=da2.sel(ltype=1)/100 cropareats2=landfrac*area*cropfracts2 cropareats2=cropareats2.sum(dim='lon').sum(dim='lat')/1.e6 cropareatsanom = cropareats-cropareats2 # + treefracts=da3.sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfracts=da3.sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natvegfracts=da1.sel(ltype=0)/100 treefracts2=da4.sel(natpft=range(1,11,1)).sum(dim='natpft')/100 grassfracts2=da4.sel(natpft=range(9,14,1)).sum(dim='natpft')/100 natvegfracts2=da2.sel(ltype=0)/100 treeareats=landfrac*area*treefracts*natvegfracts treeareats=treeareats.sum(dim='lon').sum(dim='lat')/1.e6 treeareats2=landfrac*area*treefracts2*natvegfracts2 treeareats2=treeareats2.sum(dim='lon').sum(dim='lat')/1.e6 treeareatsanom = treeareats - treeareats2 grassareats=landfrac*area*grassfracts*natvegfracts grassareats=grassareats.sum(dim='lon').sum(dim='lat')/1.e6 grassareats2=landfrac*area*grassfracts2*natvegfracts grassareats2=grassareats2.sum(dim='lon').sum(dim='lat')/1.e6 grassareatsanom = grassareats - grassareats2 # + ax=plt.axes() cropareats.plot.line(color='purple',linewidth=5,label='Crop') cropareats2.plot.line(color='purple',linewidth=5, linestyle='dashed') treeareats.plot.line(color='green',linewidth=5,label='Tree') treeareats2.plot.line(color='green',linewidth=5, linestyle = 'dashed') grassareats.plot.line(color='orange',linewidth=5,label='Grass/Shrub') grassareats2.plot.line(color='orange',linewidth=5,linestyle = 'dashed') ax.set_title('') ax.set_ylabel('millions km$^2$') ax.set_xlabel('Year') ax.legend() # + ax=plt.axes() (cropareats-cropareats[0]).plot.line(color='orange',linewidth=5,label='Crop (SSP1-2.6)') (cropareats2-cropareats2[0]).plot.line(color='orange',linewidth=5, linestyle='dotted',label='Crop (SSP3-7)') (treeareats-treeareats[0]).plot.line(color='green',linewidth=5,label='Tree (SSP1-2.6)') (treeareats2-treeareats2[0]).plot.line(color='green',linewidth=5, linestyle = 'dotted', label='Tree (SSP3-7)') #grassareats.plot.line(color='orange',linewidth=5,label='Grass/Shrub') #grassareats2.plot.line(color='orange',linewidth=5,linestyle = 'dashed') ax.set_title('') ax.set_ylabel('millions km$^2$') ax.set_xlabel('Year') ax.legend() # + ax=plt.axes() (cropareats-cropareats[0]).plot.line(color='orange',linewidth=5,label='Crop') (treeareats-treeareats[0]).plot.line(color='green',linewidth=5,label='Tree') #grassareats.plot.line(color='orange',linewidth=5,label='Grass/Shrub') #grassareats2.plot.line(color='orange',linewidth=5,linestyle = 'dashed') ax.set_title('') ax.set_ylabel('millions km$^2$') ax.set_xlabel('Year') ax.legend() # - ax=plt.axes() cropareatsanom.plot.line(color='purple',linewidth=5,label='Crop') treeareatsanom.plot.line(color='green',linewidth=5,label='Tree') grassareatsanom.plot.line(color='orange',linewidth=5,label='Grass/Shrub') ax.set_title('Land use change') ax.set_ylabel('millions km$^2$') ax.set_xlabel('Year') ax.grid() ax.legend() ax = plt.subplot(2,1,1) cropareats.plot.line(color='purple',linewidth=5,label='Crop') ax2 = plt.subplot(2,1,2) treeareats.plot.line(color='green',linewidth=5,label='Tree') ax.set_title('') ax.set_ylabel('millions km$^2$') ax.set_xlabel('Year') ax.legend() dclim1reg = dclim1.sel(lon=slice(300,340),lat=slice(40,50)).mean(dim='lon').mean(dim='lat') dclim2reg = dclim2.sel(lon=slice(300,340),lat=slice(40,50)).mean(dim='lon').mean(dim='lat') (dclim2reg-dclim1reg).plot()
notebooks/LandUseChange_maps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Metadata # # ``` # Course: DS 5001 # Module: 01 Lab # Topic: First Foray # Author: <NAME> # # Purpose: We take a raw text file from Project Gutenberg and convert it into a dataframe of tokens. We then explore some properties of the data. The main idea is to get acquainted with the primary process of convert text into analytical form. # ``` # # Set Up import pandas as pd import seaborn as sns sns.set() # # Import File lines = open('pg105.txt', 'r').readlines() lines[:5] lines[-5:] # # Convert to Dataframe text = pd.DataFrame(lines) text.sample(10) text.columns = ['line_str'] text.head() text.index.name = 'line_num' text.head() # # Extract Simple Features text['len'] = text.line_str.str.len() text.len.describe() text.len.hist(); # **Why two humps?** What might this bimodal distribution indicate? # Let's look at the first hump for characters. text[text['len'] < 5].sample(10) # # Import Again # # Now that we know what line breaks mean, we can use this information to import the file with a more accurate structure. Note also that we could have inferred this from visual inspection, too. But the principle that statistical features can provide evidence for structure remains -- we will use this throughout the course. # ## Interpret line breaks `\n` chunk_pat = '\n\n' chunks = open('pg105.txt', 'r').read().split(chunk_pat) text = pd.DataFrame(chunks, columns=['chunk_str']) text.index.name = 'chunk_id' text.head() text.shape # ## Remove remaining breaks text.chunk_str = text.chunk_str.str.replace('\n+', ' ', regex=True).str.strip() text.head() # + [markdown] toc-hr-collapsed=false # # Convert Lines to Tokens # # `K: A dataframe of tokens` # - K = text.chunk_str.str.split(' ', expand=True).stack().to_frame('token_str') K.index.names = ['chunk_num','token_num'] K.shape K.head() # + # Broken down into steps # + # text.chunk_str.str.split(' ') # + # text.chunk_str.str.split(' ', expand=True) # + # text.chunk_str.str.split(' ', expand=True).stack() # + # text.chunk_str.str.split(' ', expand=True).stack().to_frame('token_str') # - K.iloc[100:120, :] # # Do Some Cleaning K['term_str'] = K.token_str.str.replace(r'\W+', '', regex=True).str.lower() K.sample(10) # # Extract a Vocabulary # # `v: A table of terms` (As opposed to tokens, which are term _instances_.) V = K.term_str.value_counts().to_frame('n') V.index.name = 'term_str' V.head(10) # # Visualize Frequest Words V.plot(figsize=(10,5), fontsize=14, rot=45, legend=False); V.n.head(20).sort_values().plot.barh(figsize=(10,10)); # # The The # # Why is "the" the most frequent word? # # Consider that "the" is "[The Most Powerful Word in the English Language](http://www.bbc.com/culture/story/20200109-is-this-the-most-powerful-word-in-the-english-language)." # # > ... ‘the’ lies at the heart of English grammar, having a function rather than a meaning. Words are split into two categories: expressions with a semantic meaning and functional words like ‘the’, ‘to’, ‘for’, with a job to do. ‘The’ can function in multiple ways. This is typical, explains <NAME>, assistant professor in linguistics at New York University: “a super high-usage word will often develop a real flexibility”, with different subtle uses that make it hard to define. Helping us understand what is being referred to, ‘the’ makes sense of nouns as a subject or an object. So even someone with a rudimentary grasp of English can tell the difference between ‘I ate an apple’ and ‘I ate the apple’. # # Note: function vs. meaning ... # # > **Function words are very specific to each language.**<br/><br/> # So, someone who is a native Hindi or Russian speaker is going to have to think very differently when constructing a sentence in English. Murphy says that she has noticed, for instance, that sometimes her Chinese students hedge their bets and include ‘the’ where it is not required. Conversely, Smith describes Russian friends who are so unsure when to use ‘the’ that they sometimes leave a little pause: ‘I went into... bank. I picked up... pen.’ English speakers learning a language with no equivalent of ‘the’ also struggle and might overcompensate by using words like ‘this’ and ‘that’ instead. # # Word Dispersion Plots def word_plot(term_str): global K term_str = term_str.lower() (K.term_str == term_str)\ .reset_index(drop=True).astype('int')\ .to_frame(term_str)\ .plot(figsize=(20, .5), legend=False, title=term_str) word_plot('the') word_plot('superior') word_plot('delicacy') word_plot('anne') word_plot('walter') word_plot('wentworth')
M01_Intro/M01_03_first-foray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Redcard Exploratory Data Analysis # # This dataset is taken from a fantastic paper that looks to see how analytical choices made by different data science teams on the same dataset in an attempt to answer the same research question affect the final outcome. # # [Many analysts, one dataset: Making transparent how variations in analytical choices affect results](https://osf.io/gvm2z/) # # The data can be found [here](https://osf.io/47tnc/). # # # ## The Task # # Do an Exploratory Data Analysis on the redcard dataset. Keeping in mind the question is the following: **Are soccer referees more likely to give red cards to dark-skin-toned players than light-skin-toned players?** # # - Before plotting/joining/doing something, have a question or hypothesis that you want to investigate # - Draw a plot of what you want to see on paper to sketch the idea # - Write it down, then make the plan on how to get there # - How do you know you aren't fooling yourself # - What else can I check if this is actually true? # - What evidence could there be that it's wrong? # # + # %matplotlib inline # %config InlineBackend.figure_format='retina' from __future__ import absolute_import, division, print_function import matplotlib as mpl from matplotlib import pyplot as plt from matplotlib.pyplot import GridSpec import seaborn as sns import numpy as np import pandas as pd import os, sys from tqdm import tqdm import warnings warnings.filterwarnings('ignore') sns.set_context("poster", font_scale=1.3) import missingno as msno import pandas_profiling from sklearn.datasets import make_blobs import time # - # ## About the Data # # > The dataset is available as a list with 146,028 dyads of players and referees and includes details from players, details from referees and details regarding the interactions of player-referees. A summary of the variables of interest can be seen below. A detailed description of all variables included can be seen in the README file on the project website. # # > From a company for sports statistics, we obtained data and profile photos from all soccer players (N = 2,053) playing in the first male divisions of England, Germany, France and Spain in the 2012-2013 season and all referees (N = 3,147) that these players played under in their professional career (see Figure 1). We created a dataset of player–referee dyads including the number of matches players and referees encountered each other and our dependent variable, the number of red cards given to a player by a particular referee throughout all matches the two encountered each other. # # > -- https://docs.google.com/document/d/1uCF5wmbcL90qvrk_J27fWAvDcDNrO9o_APkicwRkOKc/edit # # # | Variable Name: | Variable Description: | # | -- | -- | # | playerShort | short player ID | # | player | player name | # | club | player club | # | leagueCountry | country of player club (England, Germany, France, and Spain) | # | height | player height (in cm) | # | weight | player weight (in kg) | # | position | player position | # | games | number of games in the player-referee dyad | # | goals | number of goals in the player-referee dyad | # | yellowCards | number of yellow cards player received from the referee | # | yellowReds | number of yellow-red cards player received from the referee | # | redCards | number of red cards player received from the referee | # | photoID | ID of player photo (if available) | # | rater1 | skin rating of photo by rater 1 | # | rater2 | skin rating of photo by rater 2 | # | refNum | unique referee ID number (referee name removed for anonymizing purposes) | # | refCountry | unique referee country ID number | # | meanIAT | mean implicit bias score (using the race IAT) for referee country | # | nIAT | sample size for race IAT in that particular country | # | seIAT | standard error for mean estimate of race IAT | # | meanExp | mean explicit bias score (using a racial thermometer task) for referee country | # | nExp | sample size for explicit bias in that particular country | # | seExp | standard error for mean estimate of explicit bias measure | # # # + # Uncomment one of the following lines and run the cell: # df = pd.read_csv("redcard.csv.gz", compression='gzip') # df = pd.read_csv("https://github.com/cmawer/pycon-2017-eda-tutorial/raw/master/data/redcard/redcard.csv.gz", compression='gzip') # - def save_subgroup(dataframe, g_index, subgroup_name, prefix='raw_'): save_subgroup_filename = "".join([prefix, subgroup_name, ".csv.gz"]) dataframe.to_csv(save_subgroup_filename, compression='gzip', encoding='UTF-8') test_df = pd.read_csv(save_subgroup_filename, compression='gzip', index_col=g_index, encoding='UTF-8') # Test that we recover what we send in if dataframe.equals(test_df): print("Test-passed: we recover the equivalent subgroup dataframe.") else: print("Warning -- equivalence test!!! Double-check.") def load_subgroup(filename, index_col=[0]): return pd.read_csv(filename, compression='gzip', index_col=index_col) clean_players = load_subgroup("cleaned_players.csv.gz") players = load_subgroup("raw_players.csv.gz", ) countries = load_subgroup("raw_countries.csv.gz") referees = load_subgroup("raw_referees.csv.gz") agg_dyads = pd.read_csv("raw_dyads.csv.gz", compression='gzip', index_col=[0, 1]) # tidy_dyads = load_subgroup("cleaned_dyads.csv.gz") tidy_dyads = pd.read_csv("cleaned_dyads.csv.gz", compression='gzip', index_col=[0, 1]) # ## Joining and further considerations # !conda install pivottablejs -y from pivottablejs import pivot_ui clean_players = load_subgroup("cleaned_players.csv.gz") temp = tidy_dyads.reset_index().set_index('playerShort').merge(clean_players, left_index=True, right_index=True) temp.shape # + # This does not work on Azure notebooks out of the box # pivot_ui(temp[['skintoneclass', 'position_agg', 'redcard']], ) # - # How many games has each player played in? games = tidy_dyads.groupby(level=1).count() sns.distplot(games); (tidy_dyads.groupby(level=0) .count() .sort_values('redcard', ascending=False) .rename(columns={'redcard':'total games refereed'})).head() (tidy_dyads.groupby(level=0) .sum() .sort_values('redcard', ascending=False) .rename(columns={'redcard':'total redcards given'})).head() (tidy_dyads.groupby(level=1) .sum() .sort_values('redcard', ascending=False) .rename(columns={'redcard':'total redcards received'})).head() tidy_dyads.head() tidy_dyads.groupby(level=0).size().sort_values(ascending=False) total_ref_games = tidy_dyads.groupby(level=0).size().sort_values(ascending=False) total_player_games = tidy_dyads.groupby(level=1).size().sort_values(ascending=False) total_ref_given = tidy_dyads.groupby(level=0).sum().sort_values(ascending=False,by='redcard') total_player_received = tidy_dyads.groupby(level=1).sum().sort_values(ascending=False, by='redcard') sns.distplot(total_player_received, kde=False); sns.distplot(total_ref_given, kde=False); tidy_dyads.groupby(level=1).sum().sort_values(ascending=False, by='redcard').head() tidy_dyads.sum(), tidy_dyads.count(), tidy_dyads.sum()/tidy_dyads.count() player_ref_game = (tidy_dyads.reset_index() .set_index('playerShort') .merge(clean_players, left_index=True, right_index=True) ) player_ref_game.head() player_ref_game.shape bootstrap = pd.concat([player_ref_game.sample(replace=True, n=10000).groupby('skintone').mean() for _ in range(100)]) ax = sns.regplot(bootstrap.index.values, y='redcard', data=bootstrap, lowess=True, scatter_kws={'alpha':0.4,}, x_jitter=(0.125 / 4.0)) ax.set_xlabel("Skintone");
notebooks/1-RedCard-EDA/4-Redcard-final-joins.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="42k3bWsR0uNa" import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from keras.datasets import boston_housing from keras.models import Sequential from keras.layers import Dense # + id="jXxDyEYm6vof" #Загрузка данных (x_train, y_train), (x_test, y_test) = boston_housing.load_data() # + colab={"base_uri": "https://localhost:8080/"} id="nY_oM_lA7_aQ" outputId="be7b7e74-1db6-4365-e46d-456d6e40aa7d" x_train.shape # + colab={"base_uri": "https://localhost:8080/"} id="cECJoj-g8JiP" outputId="9dcda4b9-077f-4ffa-deb6-e090b17c4fcd" x_test.shape # + id="IBU_naTo6_IQ" #Нормализация данных mean = x_train.mean(axis=0) std = x_train.std(axis=0) x_train -= mean x_train /= std x_test -= mean x_test /= std # + id="1Ogt1jH48b84" #Создаем сеть model = Sequential() model.add(Dense(128, activation='relu', input_shape=(x_train.shape[1],))) model.add(Dense(1)) model.compile(optimizer='adam', loss='mse', metrics=['mae']) # + colab={"base_uri": "https://localhost:8080/"} id="YEcd2AMG_DJa" outputId="6b620246-f8da-493a-c2f6-9e0799bd232e" model.summary() # + colab={"base_uri": "https://localhost:8080/"} id="4gQ7_2zY8jAA" outputId="b1ab6bb6-f389-4161-c40e-4077bde11d88" #Обучаем сеть history = model.fit(x_train, y_train, epochs=100, verbose=1, validation_split=0.2) # + colab={"base_uri": "https://localhost:8080/", "height": 206} id="Gkev8Lmk_fe4" outputId="d4062f81-0778-4461-f241-3f83a2465e29" hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() # + id="fUGNr0x8_znm" def plot_loss(history): plt.plot(history.history['loss'], label='loss') plt.plot(history.history['val_loss'], label='val_loss') plt.ylim([0, 600]) plt.xlabel('Epoch') plt.ylabel('Error') plt.legend() plt.grid(True) # + colab={"base_uri": "https://localhost:8080/", "height": 283} id="llUFChsD_5mv" outputId="4d949437-60c8-4546-c966-543f9b640cd3" plot_loss(history) # + id="UWtUaR3U9KHI" #Оценка точности модели mse, mae = model.evaluate(x_test, y_test, verbose=0) # + colab={"base_uri": "https://localhost:8080/"} id="z8KMHWAp9R2_" outputId="6ab89f96-d689-4358-c126-509c51c70c3c" print("Средняя абсолютная ошибка (тысяч долларов):", mae) # + id="2gtudltn9XtI" #Предсказание pred = model.predict(x_test) # + colab={"base_uri": "https://localhost:8080/"} id="Y2BxyC-_9dKv" outputId="1af2b5d5-05b3-4e83-e051-4b89c019d126" print("Предсказанная стоимость:", pred[1][0], ", правильная стоимость:", y_test[1])
keras/google_colab_keras_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #from Protocole_complet import search_wiki_ #from ipynb.fs.full.Protocole_complet import search_wiki_ import glob from ipynb.fs.full.scrap_on_google import advanced_scraper from ipynb.fs.full.similarity_measure import compute_best_terms, compute_best_doc # from ipynb.fs.full.applyBiotex import biotex_terms_extractor # ### Build thematic corpora # * Build final thematic corpora by specifying an Spatial extent, and the thematic vocabulary concepts file ### Automate for all voc_concepts files # files_list = glob.glob("./voc_concept/agriculture.txt") voc_concept_file = "./voc_concept/agriculture.txt" spatial_extent = 'montpellier' # mgdb,mgcol = 'inventaire_medo', 'agriculture' # parameters to be set initially # for voc_concept in files_list: advanced_scraper(spatial_extent,voc_concept_file,voc_concept_file)
example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="bzOQPCl06PvI" colab={"base_uri": "https://localhost:8080/"} outputId="094a18c5-bbae-4683-cfc1-a3cca6c0b8dd" # !git clone https://github.com/phan-le-phu/Plate_detect_and_recognize # + id="KP7OzZkj6UNj" colab={"base_uri": "https://localhost:8080/"} outputId="2d417390-24cb-42de-e762-4cef070060aa" # cd Plate_detect_and_recognize/ # + id="eaXOzyLB6MQd" # remove warning message import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # required library import cv2 import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from local_utils import detect_lp from os.path import splitext,basename from keras.models import model_from_json import glob import math # + [markdown] id="-LY12xfI6MQo" # #### Extract license plate from sample _Plate_examples/germany_car_plate.jpg_ # + id="c5P7VU3U6MQp" def load_model(path): try: path = splitext(path)[0] with open('%s.json' % path, 'r') as json_file: model_json = json_file.read() model = model_from_json(model_json, custom_objects={}) model.load_weights('%s.h5' % path) print("Loading model successfully...") return model except Exception as e: print(e) # + id="Wrh6g-Fc6MQq" colab={"base_uri": "https://localhost:8080/"} outputId="30db37d3-0ae4-49da-bb9e-e739f44f62eb" wpod_net_path = "wpod-net.json" wpod_net = load_model(wpod_net_path) # + id="M7mqkdca6MQs" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="b6f0e9b7-2055-4316-bd31-950b63535c06" def preprocess_image(image_path,resize=False): img = cv2.imread(image_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255 if resize: img = cv2.resize(img, (224,224)) return img def get_plate(image_path, Dmax=608, Dmin=256): vehicle = preprocess_image(image_path) ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2]) side = int(ratio * Dmin) bound_dim = min(side, Dmax) _ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold=0.5) return vehicle, LpImg, cor #test_image_path = "test_2.jpg" test_image_path = "/content/xe-may-bien-ngu-quy-1602131030738310515689.jpg" vehicle, LpImg,cor = get_plate(test_image_path) # + id="W6rsSHEZBlcI" colab={"base_uri": "https://localhost:8080/", "height": 536} outputId="64fa3b73-e0f2-49fc-c85d-a940ddb628e9" nrows = math.ceil(len(LpImg) / 3) + 1 fig3 = plt.figure(figsize=(20, 10)) ax = fig3.add_subplot(nrows, 3, 1) ax.axis("off") ax.set_title("Hình ảnh cần detect") implot = plt.imshow(vehicle) for i in range(len(LpImg)): ax = fig3.add_subplot(nrows, 3, 4 + i) ax.axis("off") ax.set_title(f"Biển số {i + 1}") plt.imshow(LpImg[i]) # + id="7as0wre86MQt" colab={"base_uri": "https://localhost:8080/", "height": 433} outputId="44c9dc22-8651-4a13-cfd5-c15189ec9f55" if (len(LpImg)): #check if there is at least one license image # Scales, calculates absolute values, and converts the result to 8-bit. plate_image = cv2.convertScaleAbs(LpImg[0], alpha=(255.0)) # convert to grayscale and blur the image gray = cv2.cvtColor(plate_image, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(7,7),0) # Applied inversed thresh_binary binary = cv2.threshold(blur, 180, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3) # visualize results fig = plt.figure(figsize=(12,7)) plt.rcParams.update({"font.size":18}) grid = gridspec.GridSpec(ncols=2,nrows=3,figure = fig) plot_image = [plate_image, gray, blur, binary,thre_mor] plot_name = ["plate_image","gray","blur","binary","dilation"] for i in range(len(plot_image)): fig.add_subplot(grid[i]) plt.axis(False) plt.title(plot_name[i]) if i ==0: plt.imshow(plot_image[i]) else: plt.imshow(plot_image[i],cmap="gray") # plt.savefig("threshding.png", dpi=300) # + id="Ln18UR8o6MQt" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="10b544ba-a6ff-4089-c265-27455525bab0" # Create sort_contours() function to grab the contour of each digit from left to right def sort_contours(cnts,reverse = False): i = 0 boundingBoxes = [cv2.boundingRect(c) for c in cnts] (cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes), key=lambda b: b[1][i], reverse=reverse)) return cnts cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # creat a copy version "test_roi" of plat_image to draw bounding box test_roi = plate_image.copy() # Initialize a list which will be used to append charater image crop_characters = [] # define standard width and height of character digit_w, digit_h = 30, 60 for c in sort_contours(cont): (x, y, w, h) = cv2.boundingRect(c) ratio = h/w if 1<=ratio<=3.5: # Only select contour with defined ratio if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate # Draw bounding box arroung digit number cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255,0), 2) # Sperate number and gibe prediction curr_num = thre_mor[y:y+h,x:x+w] curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h)) _, curr_num = cv2.threshold(curr_num, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) crop_characters.append(curr_num) print("Detect {} letters...".format(len(crop_characters))) fig = plt.figure(figsize=(10,6)) plt.axis(False) plt.imshow(test_roi) #plt.savefig('grab_digit_contour.png',dpi=300) # + id="A0S_w4zb6MQu" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="c27e038a-09ed-4ca5-d74e-458222e147bc" fig = plt.figure(figsize=(14,4)) grid = gridspec.GridSpec(ncols=len(crop_characters),nrows=1,figure=fig) for i in range(len(crop_characters)): fig.add_subplot(grid[i]) plt.axis(False) plt.imshow(crop_characters[i],cmap="gray") #plt.savefig("segmented_leter.png",dpi=300) # + [markdown] id="gNL9L__v6MQ2" # # The end!
[Part 2]Plate_character_segmentation_with_OpenCV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import json import re import pandas as pd # - # ### Import ESIF data wales = pd.read_excel('input/wales-approved-projects-2015.xlsx') wales.head() #remove title lines wales = wales[~wales['EU Programme'].isnull()] # ### Manually put in postcodes # # Initially I tried to merge with GRID data, but this proved inefficient wales.shape wales[['Lead Organisation']].sort_values(axis=0, by='Lead Organisation').tail(40) wales_locations_lookup = pd.DataFrame([ ('Aberystwyth Innovation and Enterprise Campus Ltd', 'SY233EE'), ('Aberystwyth University', 'SY23 3FL'), ('Bangor University', 'LL572DG'), ('Blaenau Gwent County Borough Council', 'NP236XB'), ('Bridgend County Borough Council', 'CF314WB'), ('Caerphilly County Borough Council', 'CF827PG', 'CF827WF'), ('Cardiff University', 'CF103AT'), ('Cardiff Metropolitan University', 'CF52YB'), ('Carmarthenshire County Council', 'SA183EL'), ('Ceredigion County Council', 'SY233UE'), ('Chwarae Teg', 'CF245JW'), ('Coleg Cambria', 'LL137UH', 'CH5 4BR', 'LL152LB', 'CH76AA'), ('Coleg Gwent', 'NP19 4TS'), ('Coleg Y Cymoedd', 'CF818RD', 'CF448EN', 'CF157QY', 'CF827XR'), ('Conwy County Borough Council', 'LL328DU'), ('Denbighshire County Borough Council', 'LL159AZ'), ('Engineering Education Scheme Wales', 'CF313WT'), ('Finance Wales Plc', 'CF104BZ', 'SA148LQ', 'SY161RB', 'LL170JD'), ('Gower College Swansea', 'SA2 9EB', 'SA4 6RD', 'SA1 4QA'), ('Grwp Llandrillo-Menai', 'LL284HZ', 'LL572TP', 'LL402SW'), ('Higher Education Funding Council for Wales', 'CF83 8WT'), ('Isle of Anglesey County Council', 'LL777TW'), ('Marine Power Systems Limited', 'SA18AS'), ('Menter Mon', 'LL777LR'), ('Minesto UK LTD', 'LL651UN', 'BT221NZ'), ('Neath Port Talbot County Borough Council', 'SA131PJ'), ('Newport City Council', 'NP204UR'), ('Pembrokeshire Coastal Forum', 'SA726UL'), ('Pembrokeshire County Council', 'SA611TP'), ('Powys County Council', 'LD15LG'), ('Rhondda Cynon Taff Council', 'CF402XX'), ('SOVA', 'CF101FE'), ('Swansea Council', 'SA13SN'), ('Swansea University', 'SA2 8PP'), ('TWI Ltd', 'SA131SB'), ('Torfaen County Borough Council', 'NP46YB'), ('University of South Wales', 'CF37 1DL'), ('University of Wales Trinity Saint David', 'SA487ED'), ('WG - Department for Education and Skills', 'CF103NQ'), ('WG - Department for Health and Social Services', 'CF103NQ'), ('WG - Department of Local Government and Communities', 'CF103NQ'), ('WG - Department for Economy, Science and Transport', 'CF103NQ'), ('Wales Co operative Centre', 'CF832AX'), ('Wales Council for Voluntary Action', 'CF832AX', 'CF105FH', 'SY233AH'), ('Wave Hub Limited', 'TR274DD'), ('Wave-Tricity Limited', 'SA726YH'), ('Welsh Contact Centre Forum Ltd', 'CF642AG'), ], columns=['Lead Organisation', 'postcode', 'postcode2', 'postcode3', 'postcode4']) wales_locations_lookup.head() # Not found ('WAG The Economic Development Group within DEandT'), WAG DEandT WEFO. More locations for Gower College Swansea # # ### Merge back together and rename wales_locations = pd.merge( wales, wales_locations_lookup, how='left', on='Lead Organisation') wales_locations.shape wales_locations.columns wales_locations[wales_locations.postcode.isnull()][['Lead Organisation']].sort_values('Lead Organisation').head(40) wales_locations = wales_locations.rename(index=str, columns={ 'Project Title': 'project', 'Lead Organisation': 'beneficiary', 'EU funds awarded': 'eu_investment', 'Total Project cost': 'project_cost', 'EU Programme': 'funds', 'Project Description': 'summary', 'Project start date': 'start_date', 'Project end date': 'end_date', 'Union co-financing rate, as per Priority Axis': 'prop_eu_financed', 'Case ID': 'case_id', 'postcode': 'raw_postcode', }) wales_locations.raw_postcode.isna().sum() esif = wales_locations esif.shape esif.columns # ### Beneficiary esif.beneficiary[esif.beneficiary.str.strip() != esif.beneficiary] # ### Case ID # # Appears to be unique. [esif.shape[0], esif.case_id.nunique(), esif.case_id.isna().sum()] esif.case_id = esif.case_id.astype('int32') esif.case_id.nunique() # ### Project # # Do we have any duplicate project names? Does not look like it. esif.project[esif.project.str.contains('\n')] esif.project[esif.project.str.strip() != esif.project] esif.project = esif.project.str.strip() esif.project.unique().shape esif[esif.duplicated(['beneficiary', 'project', 'funds'], keep=False)].sort_values('project') # ### Summary esif.summary.isna().sum() (esif.summary != esif.summary.str.strip()).sum() esif.summary = esif.summary.str.strip() # ### Funds esif.funds.isna().sum() esif.funds.unique() # + esif.funds = esif.funds.\ str.strip().str.replace('East Wales ERDF', 'ERDF').\ str.replace('West Wales and the Valleys ERDF', 'ERDF').\ str.replace('East Wales ESF', 'ESF').\ str.replace('West Wales and the Valleys ESF', 'ESF') esif.funds.unique() # - # ### Project Cost esif.project_cost.isna().sum() esif.project_cost = esif.project_cost.map(str).str.strip() project_cost_bad = esif.project_cost.str.match(re.compile(r'.*[^0-9.].*')) esif.project_cost[project_cost_bad] # ### EU Investment # esif.eu_investment.isna().sum() esif.eu_investment = esif.eu_investment.map(str).str.strip() eu_investment_bad = esif.eu_investment.str.match(re.compile(r'.*[^0-9.].*')) esif.eu_investment[eu_investment_bad] # ### Overfunding # esif.project_cost = esif.project_cost.astype('float') esif.eu_investment = esif.eu_investment.astype('float') overfunded = esif.eu_investment > esif.project_cost esif[overfunded] # ### Prop EU Financed # # This provides a useful check. The [ESF guidance for 2014-2020](https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/710305/ESF_Guidance_for_2014_2020_v2.pdf) says that contributions over 50% are unlikely in the UK. # # Most are not the stated value, but they don't look obviously wrong. esif.prop_eu_financed.isna().sum() esif.prop_eu_financed.describe() esif['actual_prop'] = esif.eu_investment / esif.project_cost esif.actual_prop.describe() esif[(esif.actual_prop - esif.prop_eu_financed).abs() > 0.05] # ### Postcode # [esif.shape, esif.raw_postcode.isna().sum()] esif = esif[~esif.raw_postcode.isna()].copy() esif.shape ukpostcodes = pd.read_csv('../postcodes/input/ukpostcodes.csv.gz') ukpostcodes.shape esif.raw_postcode.isin(ukpostcodes.postcode).sum() esif['postcode'] = esif.raw_postcode.\ str.upper().\ str.strip().\ str.replace(r'[^A-Z0-9]', '').\ str.replace(r'^(\S+)([0-9][A-Z]{2})$', r'\1 \2') esif.postcode.isin(ukpostcodes.postcode).sum() esif.postcode[~esif.postcode.isin(ukpostcodes.postcode)].unique() # ### Start and End Dates # [esif.start_date.isna().sum(), esif.start_date.dtype] [esif.end_date.isna().sum(), esif.end_date.dtype] (esif.start_date >= esif.end_date).sum() esif[['start_date', 'end_date']].describe() # We have one end date with an hour; keep only the day. esif[esif.end_date.dt.ceil('d') != esif.end_date.dt.floor('d')]['end_date'] esif.end_date = esif.end_date.values.astype('<M8[D]') # ## Save Data clean_esif = esif.drop([ 'Priority', 'prop_eu_financed', 'raw_postcode', 'postcode2', 'postcode3', 'postcode4', 'Source: WEFO, 30/04/2018', 'Welsh Government Targeted Match Funding, Yes / No’', 'Sector ', 'Joint Sponsors', 'Regional area(s)', 'Category of intervention', 'actual_prop' ], axis=1) clean_esif.head() clean_esif['my_eu_id'] = clean_esif.funds.str.lower() + '_wales_' + clean_esif.case_id.astype('str') clean_esif.my_eu_id.head() clean_esif.drop('case_id', axis=1, inplace=True) clean_esif.to_pickle('output/esif_wales.pkl.gz') # ## Save Map Data clean_esif_locations = pd.merge(clean_esif, ukpostcodes, validate='m:1') clean_esif_locations.head() def make_esif_data_geo_json(data): def make_feature(row): properties = { property: row[property] for property in ['beneficiary', 'project', 'project_cost', 'eu_investment'] } return { 'type': 'Feature', 'geometry': { "type": "Point", "coordinates": [row['longitude'], row['latitude']] }, 'properties': properties } features = list(data.apply(make_feature, axis=1)) return { 'type': 'FeatureCollection', 'features': features } with open('output/wales_data.geo.json', 'w') as file: json.dump(make_esif_data_geo_json(clean_esif_locations), file, sort_keys=True)
data/esif/wales_esif_clean.ipynb