code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3-azureml # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Erkennen und Analysieren von Gesichtern # # Lösungen für maschinelles Sehen erfordern häufig eine Lösung mit Künstlicher Intelligenz (KI), um menschliche Gesichter erkennen, analysieren oder identifizieren zu können. Angenommen, das Einzelhandelsunternehmen Northwind Traders möchte einen „Smart Store“ implementieren, der von KI überwacht wird, um zu identifizieren, welche Kunden Unterstützung benötigen, und Mitarbeiter zu diesen Kunden zu schicken. Dieses Ziel kann mit der Erkennung und Analyse von Gesichtern erreicht werden. Dazu müssen Gesichter in den Bildern erkannt und ihre Merkmale analysiert werden. # # ![Ein Roboter, der ein Gesicht analysiert](./images/face_analysis.jpg) # # ## Verwenden des Cognitive Service „Gesichtserkennung“, um Gesichter zu erkennen # # Angenommen, das Smart-Store-System für Northwind Traders soll Kunden erkennen und ihre Gesichtsmerkmale analysieren können. In Microsoft Azure können Sie zu diesem Zweck die **Gesichtserkennung** verwenden, die zu den Azure Cognitive Services gehört. # # ### Erstellen einer Cognitive Services-Ressource # # Erstellen Sie zunächst eine **Cognitive Services**-Ressource in Ihrem Azure-Abonnement. # # > **Hinweis**: Falls Sie bereits eine Cognitive Services-Ressource haben, können Sie die entsprechende **Schnellstart**-Seite im Azure-Portal öffnen und den Schlüssel und den Endpunkt der Ressource unten in die Zelle kopieren. Führen Sie andernfalls die folgenden Schritte aus, um eine Ressource zu erstellen. # # 1. Öffnen Sie das Azure-Portal unter „https://portal.azure.com“ in einer neuen Browserregisterkarte, und melden Sie sich mit Ihrem Microsoft-Konto an. # 2. Klicken Sie auf die Schaltfläche **&#65291;Ressource erstellen**, suchen Sie nach *Cognitive Services*, und erstellen Sie eine **Cognitive Services**-Ressource mit den folgenden Einstellungen: # - **Abonnement**: *Ihr Azure-Abonnement* # - **Ressourcengruppe**: *Wählen Sie eine Ressourcengruppe aus, oder erstellen Sie eine Ressourcengruppe mit einem eindeutigen Namen.* # - **Region**: *Wählen Sie eine verfügbare Region aus*: # - **Name**: *Geben Sie einen eindeutigen Namen ein.* # - **Tarif**: S0 # - **Ich bestätige, dass ich die Hinweise gelesen und verstanden habe**: Ausgewählt # 3. Warten Sie, bis die Bereitstellung abgeschlossen ist. Öffnen Sie anschließend Ihre Cognitive Services-Ressource, und klicken Sie auf der Seite **Übersicht** auf den Link zur Schlüsselverwaltung für den Dienst. Sie benötigen den Endpunkt und Schlüssel, um sich aus Clientanwendungen heraus mit Ihrer Cognitive Services-Ressource zu verbinden. # # ### Abrufen des Schlüssels und Endpunkts für Ihre Cognitive Services-Ressource # # Um Ihre Cognitive Services-Ressource verwenden zu können, benötigen Clientanwendungen deren Endpunkt und Authentifizierungsschlüssel: # # 1. Kopieren Sie im Azure-Portal auf der Seite **Schlüssel und Endpunkt** für Ihre Cognitive Service-Ressource den **Schlüssel1** für Ihre Ressource, und fügen Sie ihn im unten stehenden Code anstelle von **YOUR_COG_KEY** ein. # # 2. Kopieren Sie den **Endpunkt** für Ihre Ressource, und fügen Sie ihn unten im Code anstelle von **YOUR_COG_ENDPOINT** ein. # # 3. Führen Sie die folgende Codezelle aus, indem Sie oben links in der Zelle auf die Schaltfläche „Zelle ausführen“ <span>&#9655;</span> klicken. # + gather={"logged": 1599693964655} cog_key = 'YOUR_COG_KEY' cog_endpoint = 'YOUR_COG_ENDPOINT' print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key)) # - # Jetzt haben Sie eine Cognitive Services-Ressource, mit der Sie den Gesichtserkennungsdienst verwenden können, um menschliche Gesichter im Geschäft zu erkennen. # # Führen Sie die unten stehende Codezelle aus, um ein Beispiel zu sehen. # + gather={"logged": 1599693970079} from azure.cognitiveservices.vision.face import FaceClient from msrest.authentication import CognitiveServicesCredentials from python_code import faces import os # %matplotlib inline # Create a face detection client. face_client = FaceClient(cog_endpoint, CognitiveServicesCredentials(cog_key)) # Open an image image_path = os.path.join('data', 'face', 'store_cam2.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces) # - # Jedem erkannten Gesicht wird eine eindeutige ID zugeordnet, damit Ihre Anwendung die einzelnen erkannten Gesichter identifizieren kann. # # Führen Sie die folgende Zelle aus, um die IDs für weitere Gesichter von Käufern anzuzeigen. # + gather={"logged": 1599693970447} # Open an image image_path = os.path.join('data', 'face', 'store_cam3.jpg') image_stream = open(image_path, "rb") # Detect faces detected_faces = face_client.face.detect_with_stream(image=image_stream) # Display the faces (code in python_code/faces.py) faces.show_faces(image_path, detected_faces, show_id=True) # - # ## Analysieren von Gesichtsmerkmalen # # Mit der Gesichtserkennung können Sie aber nicht nur Gesichter erkennen. Sie können auch Gesichtsmerkmale und Gesichtsausdrücke analysieren, um Alter und Gefühlszustand zu erkennen. Führen Sie den folgenden Code aus, um die Gesichtsmerkmale eines Käufers zu analysieren. # + gather={"logged": 1599693971321} # Open an image image_path = os.path.join('data', 'face', 'store_cam1.jpg') image_stream = open(image_path, "rb") # Detect faces and specified facial attributes attributes = ['age', 'emotion'] detected_faces = face_client.face.detect_with_stream(image=image_stream, return_face_attributes=attributes) # Display the faces and attributes (code in python_code/faces.py) faces.show_face_attributes(image_path, detected_faces) # - # Gemäß der Emotionsbewertung des im Bild gezeigten Kunden scheint dieser mit seinem Einkaufserlebnis recht zufrieden zu sein. # # ## Suchen ähnlicher Gesichter # # Mit den Gesichts-IDs für die erkannten Bilder können Sie erkannte Gesichter einzeln identifizieren. Sie können diese IDs verwenden, um ein erkanntes Gesicht mit zuvor erkannten Gesichtern zu vergleichen und Gesichter mit ähnlichen Merkmalen zu suchen. # # Führen Sie zum Beispiel die folgende Zelle aus, um den Käufer in einem Bild mit Kunden in einem anderen Bild zu vergleichen und ein ähnliches Gesicht zu finden. # + gather={"logged": 1599693972555} # Get the ID of the first face in image 1 image_1_path = os.path.join('data', 'face', 'store_cam3.jpg') image_1_stream = open(image_1_path, "rb") image_1_faces = face_client.face.detect_with_stream(image=image_1_stream) face_1 = image_1_faces[0] # Get the face IDs in a second image image_2_path = os.path.join('data', 'face', 'store_cam2.jpg') image_2_stream = open(image_2_path, "rb") image_2_faces = face_client.face.detect_with_stream(image=image_2_stream) image_2_face_ids = list(map(lambda face: face.face_id, image_2_faces)) # Find faces in image 2 that are similar to the one in image 1 similar_faces = face_client.face.find_similar(face_id=face_1.face_id, face_ids=image_2_face_ids) # Show the face in image 1, and similar faces in image 2(code in python_code/face.py) faces.show_similar_faces(image_1_path, face_1, image_2_path, image_2_faces, similar_faces) # - # ## Erkennen von Gesichtern # # Bisher haben wir Ihnen gezeigt, wie Sie mit der Gesichtserkennung Gesichter und deren Merkmale erkennen sowie Gesichter suchen können, die einander ähneln. Als nächsten Schritt können Sie eine *Gesichtserkennungslösung* implementieren, in der Sie die Gesichtserkennung dahingehend trainieren, das Gesicht einer bestimmten Person zu erkennen. Dies ist für verschiedene Szenarien hilfreich, etwa um Fotos von Freunden in einer Social-Media-Anwendung automatisch zu markieren oder um die Gesichtserkennung für eine biometrische Identitätsüberprüfung zu nutzen. # # Gehen wir hierzu davon aus, dass Northwind Traders mit einem Gesichtserkennungssystem sicherstellen möchte, dass nur autorisierte Mitarbeiter aus der IT-Abteilung auf gesicherte Systeme zugreifen können. # # Zunächst erstellen wir eine *Personengruppe* für die autorisierten Mitarbeiter. # + gather={"logged": 1599693973492} group_id = 'employee_group_id' try: # Delete group if it already exists face_client.person_group.delete(group_id) except Exception as ex: print(ex.message) finally: face_client.person_group.create(group_id, 'employees') print ('Group created!') # - # Nachdem wir die *Personengruppe* erstellt haben, können wir für jeden Mitarbeiter, der zur Gruppe gehören soll, eine *Person* hinzufügen und mehrere Fotos für jede Person registrieren, damit die Gesichtserkennung lernen kann, die Gesichtsmerkmale der einzelnen Personen zu erkennen. Idealerweise sollten wir dazu jeweils Bilder der Personen in unterschiedlichen Haltungen und mit unterschiedlichen Gesichtsausdrücken verwenden. # # Wir fügen einen Mitarbeiter namens „Wendell“ hinzu und registrieren drei Fotos von ihm. # + gather={"logged": 1599693976898} import matplotlib.pyplot as plt from PIL import Image import os # %matplotlib inline # Add a person (Wendell) to the group wendell = face_client.person_group_person.create(group_id, 'Wendell') # Get photo's of Wendell folder = os.path.join('data', 'face', 'wendell') wendell_pics = os.listdir(folder) # Register the photos i = 0 fig = plt.figure(figsize=(8, 8)) for pic in wendell_pics: # Add each photo to person in person group img_path = os.path.join(folder, pic) img_stream = open(img_path, "rb") face_client.person_group_person.add_face_from_stream(group_id, wendell.person_id, img_stream) # Display each image img = Image.open(img_path) i +=1 a=fig.add_subplot(1,len(wendell_pics), i) a.axis('off') imgplot = plt.imshow(img) plt.show() # - # Nachdem wir die Person hinzugefügt und die Fotos registriert haben, können wir die Gesichtserkennung für die jeweilige Person trainieren. # + gather={"logged": 1599693977046} face_client.person_group.train(group_id) print('Trained!') # - # Nachdem Sie das Modell trainiert haben, können Sie es verwenden, um die erkannten Gesichter in einem Bild zu identifizieren. # + gather={"logged": 1599693994820} # Get the face IDs in a second image image_path = os.path.join('data', 'face', 'employees.jpg') image_stream = open(image_path, "rb") image_faces = face_client.face.detect_with_stream(image=image_stream) image_face_ids = list(map(lambda face: face.face_id, image_faces)) # Get recognized face names face_names = {} recognized_faces = face_client.face.identify(image_face_ids, group_id) for face in recognized_faces: person_name = face_client.person_group_person.get(group_id, face.candidates[0].person_id).name face_names[face.face_id] = person_name # show recognized faces faces.show_recognized_faces(image_path, image_faces, face_names) # - # ## Weitere Informationen # # Weitere Informationen zum Cognitive Service „Gesichtserkennung“ finden Sie in der [Dokumentation für die Gesichtserkennung](https://docs.microsoft.com/azure/cognitive-services/face/). #
04 - Face Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WeatherPy # ---- # # #### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # Observations/Analysis: # # 1) Obviously the only variable that is decently correlated with latitude is maximum temperature: as cities move away from the equator their maximum temperatures decrease. No other variable was correlated strongly enough with latitude to suggest a meaningful relationship. Temperature is not simply determined by solar radiation and angles, but it can be inferred that it is much less complex than how humidity, cloud cover, and wind speed are determined. # # 2) Interestingly, for every failed correlation, it was much weaker in the northern hemisphere than in the southern hemisphere. Even though there was no correlation between latitude and humidity/cloudiness/wind speed in either hemisphere, it was noticeably "stronger" in the south. The southern hemisphere has much less land than the northern hemisphere, and while this no doubt affects the climate in myriad ways, the explanation might be simpler. There is less land, therefore fewer cities, therefore less data, therefore any regression will inherently be stronger because it is a smaller sample. # # 3) There are some minor things that are noticeable, even if other plots weren't correlated. For example, when it comes to humidity and latitude, in both the nothern and southern hemispheres, cities appear (but it's not suggested by regression) to have a lower humidity around |30| degrees. This tracks with the fact that this is typically where most of the Earth's deserts appear, thanks to being on convergent zones between the Ferrell and Hadley Cells. But for the other two variables, no clear patterns (even if weak) are discernable. # # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from scipy.stats import linregress # Import API key from api_keys import weather_api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # - # ## Generate Cities List # + # List for holding lat_lngs and cities and countries lat_lngs = [] cities = [] countries = [] # Create a set of random lat and lng combinations lats = np.random.uniform(lat_range[0], lat_range[1], size=1500) lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination, and the country code for that same lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name country = citipy.nearest_city(lat_lng[0], lat_lng[1]).country_code # If the city is unique, then add it to a our cities list # add the country code (made uppercase) into the countries list if city not in cities: cities.append(city) countries.append(country.upper()) # Print the city count to confirm sufficient count len(cities) # - # create a dataframe with empty columns of all the data we'll want weather_df = pd.DataFrame({"City": cities, "Lat": "", "Lng": "", "Max Temp": "", "Humidity": "", "Cloudiness": "", "Wind Speed": "", "Country": countries, "Date": ""}) weather_df.head() # ### Perform API Calls # * Perform a weather check on each city using a series of successive API calls. # * Include a print log of each city as it'sbeing processed (with the city number and city name). # # + #base url url = "http://api.openweathermap.org/data/2.5/weather?" blurb = "-"*29 #counters recordnum = 1 setnum = 1 print(f"Beginning Data Retrieval") print(f"{blurb}") #iterate through the dataframe, for each index/row, take the city and country to form the query #use them along with the api key and making sure the units are in imperial for index, row in weather_df.iterrows(): query = url + "appid=" + weather_api_key + "&q=" + row["City"] + "," + row["Country"] + "&units=imperial" #create a response using requests.get (and make sure it is converted into json) response = requests.get(query).json() #for each api call, attempt to retrieve the coordinates, temperature, humidity, etc try: print(f"Processing Record {recordnum} of Set {setnum} | {response['name']}") weather_df.loc[index, "Lat"] = response["coord"]["lat"] weather_df.loc[index, "Lng"] = response["coord"]["lon"] weather_df.loc[index, "Max Temp"] = response["main"]["temp_max"] weather_df.loc[index, "Humidity"] = response["main"]["humidity"] weather_df.loc[index, "Cloudiness"] = response["clouds"]["all"] weather_df.loc[index, "Wind Speed"] = response["wind"]["speed"] weather_df.loc[index, "Date"] = response["dt"] #increment the counter recordnum += 1 #if the city is not in the database, or if the data cannot be found, except except (KeyError, IndexError): print(f"City not found. Skipping...") #conditions for the counters, so that no more than 50 cities can be in each set if recordnum == 51: setnum += 1 recordnum = 1 print(f"{blurb}") print(f"Data Retrieval Complete") print(f"{blurb}") # - # ### Convert Raw Data to DataFrame # * Export the city data into a .csv. # * Display the DataFrame #clean up the dataframe by dropping any rows that are missing data weather_df["Lat"] = weather_df["Lat"].replace("", np.nan) weather_df = weather_df.dropna(how="any") #name the index column and save the dataframe as a csv file weather_df.index.name = "City_ID" weather_df.to_csv("../Output/WeatherDatabyCity.csv") #make sure no more data is missing weather_df.count() weather_df.head() # ## Inspect the data and remove the cities where the humidity > 100%. # ---- # Skip this step if there are no cities that have humidity > 100%. # + #there are no cities with a humidity above 100 #still convert the object data of these columns into numeric weather_df["Lat"] = weather_df["Lat"].apply(pd.to_numeric, errors="coerce") weather_df["Lng"] = weather_df["Lng"].apply(pd.to_numeric, errors="coerce") weather_df["Max Temp"] = weather_df["Max Temp"].apply(pd.to_numeric, errors="coerce") weather_df["Humidity"] = weather_df["Humidity"].apply(pd.to_numeric, errors="coerce") weather_df["Cloudiness"] = weather_df["Cloudiness"].apply(pd.to_numeric, errors="coerce") weather_df["Wind Speed"] = weather_df["Wind Speed"].apply(pd.to_numeric, errors="coerce") weather_df["Date"] = weather_df["Date"].apply(pd.to_numeric, errors="coerce") #so that we can check the max value of the humidity column weather_df.describe() # - # Get the indices of cities that have humidity over 100%. index_label = weather_df[weather_df["Humidity"] > 100].index.tolist() index_label # + # Make a new dataframe and drop all humidity outliers (if there are any) by index city_data = weather_df for x in index_label: city_data.drop(x) #save the new (and functionally identical) dataframe to a csv file city_data.to_csv("../Output/city_data.csv") city_data.head() # - # ## Plotting the Data # * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. # * Save the plotted figures as .pngs. # ## Latitude vs. Temperature Plot # + #use matplotlib to create scatter plots, the code is basically the same each time, only the variables change #facecolor="4C92C3" plt.scatter(city_data["Lat"], city_data["Max Temp"], marker="o") #make sure to create a title, and label the axes, and create a grid plt.title(f"City Latitude vs. Max Temperature (7/20/2020)") plt.ylabel("Max Temperature (F)") plt.xlabel("Latitude") plt.grid(True) #save the scatter plot as an image plt.savefig("../Output/Lat_vs_Temp.png") #repeat this process for the next 4 scatter plots # - # A scatter plot, using matplotlib, testing the correlation between a city's latitude and its maximum temperature (F) # Shows that cities closer to the equator do tend to have a higher maxmimum temperature # ## Latitude vs. Humidity Plot # + plt.scatter(city_data["Lat"], city_data["Humidity"], marker="o") plt.title(f"City Latitude vs. Humidity (7/20/2020)") plt.ylabel("Humidity (%)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Output/Lat_vs_Humid.png") # - # A scatter plot, using matplotlib, testing the correlation between a city's latitude and its humidity # Does not show a clear correlation between a city's latitude and its humidity # Although there is a slight shape that suggests that cities around 30-50 N and (slightly less so for S) have lower humidities than other cities # This is generally where the deserts of Earth exist (and deserts are bigger in the Northern Hemisphere, which explains the uneven distribution of this particular phenomenon between hemispheres) # ## Latitude vs. Cloudiness Plot # + plt.scatter(city_data["Lat"], city_data["Cloudiness"], marker="o") plt.title(f"City Latitude vs. Cloudiness (7/20/2020)") plt.ylabel("Cloudiness (%)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Output/Lat_vs_Cloud.png") # - # A scatter plot, using matplotlib, testing the correlation between a city's latitude and its cloud cover percentage # Despite there being a clear shape, its difficult to determine what it suggests, as otherwise it appears totally random how much cloud cover a city will get depending on its latitude # ## Latitude vs. Wind Speed Plot # + plt.scatter(city_data["Lat"], city_data["Wind Speed"], marker="o") plt.title(f"City Latitude vs. Wind Speed (7/20/2020)") plt.ylabel("Wind Speed (mph)") plt.xlabel("Latitude") plt.grid(True) plt.savefig("../Output/Lat_vs_Wind.png") # - # A scatter plot, using matplotlib, testing the correlation between a city's latitude and its wind speed (mph) # Does not show a strong correlation, small suggestion that cities closer to the equator might have slightly higher windspeeds, which can be explained by the Corriolis Effect, but other factors (such as ocean currents, landmass, and if a city is on an island) have to be taken into consideration # ## Linear Regression # OPTIONAL: Create a function to create Linear Regression plots # using linregress and scipy.stats # determine the correlation coefficient using pearsonr, and use that to get the r-squared value # use the linregress variables to create an equation for the regression line # display both the r squared value and the regression line equation # plot the regression line onto the scatter plot that calls this function def regress(x,y): import scipy.stats as st corr = st.pearsonr(x, y) corr1 = corr[0] rsquare = round(corr1**2,6) print(f"The r-squared is: {rsquare}") (slope, intercept, rvalue, pvalue, stderr) = linregress(x, y) regress_values = x * slope + intercept line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2)) plt.plot(x,regress_values,"r-") print(f"The regression line is: {line_eq}") # Create Northern and Southern Hemisphere DataFrames # search only for northern hemisphere cities, and then only southern hemisphere cities # both include the equator northern_df = city_data.loc[(city_data["Lat"] >= 0)] southern_df = city_data.loc[(city_data["Lat"] <= 0)] # #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression # + # create a scatter plot using the relevant columns from the data frames, and labeling the axes lat = northern_df["Lat"] temp = northern_df["Max Temp"] plt.scatter(lat, temp, marker="o") plt.xlabel("Latitude") plt.ylabel("Max Temp") # call the regress function to output the numbers we want and to plot the regression line regress(lat,temp) # the rest of the regression lines will use the same basic code template # - # Using a python function to run a linear regression analysis on the scatter plot for Northern Hemisphere cities against their maximum temperatures # The line shows a moderate correlation: as cities appear farther north their maximum temperature decreases # #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression # + lat = southern_df["Lat"] temp = southern_df["Max Temp"] plt.scatter(lat, temp, marker="o") plt.xlabel("Latitude") plt.ylabel("Max Temp") regress(lat,temp) # - # Using a python function to run a linear regression analysis on the scatter plot for Southern Hemisphere cities against their maximum temperatures # The line shows a moderate correlation: as cities appear farther south their maximum temperature decreases # #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression # + lat = northern_df["Lat"] hum = northern_df["Humidity"] plt.scatter(lat, hum, marker="o") plt.xlabel("Latitude") plt.ylabel("Humidity") regress(lat,hum) # - # Using a python function to run a linear regression analysis on the scatter plot for Nothern Hemisphere cities against their humidity # The line shows practically no correlation, humidity cannot be predicted just by changing temperatures along latitudes, it appears to be a more complex phenomenon # #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression # + lat = southern_df["Lat"] hum = southern_df["Humidity"] plt.scatter(lat, hum, marker="o") plt.xlabel("Latitude") plt.ylabel("Humidity") regress(lat,hum) # - # Using a python function to run a linear regression analysis on the scatter plot for Southern Hemisphere cities against their humidity # The line shows a very slightly stronger correlation than its northern counterpart, but still not enough to suggest any kind of meaningful relationship between humidity and latitude # #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression # + lat = northern_df["Lat"] cloud = northern_df["Cloudiness"] plt.scatter(lat, cloud, marker="o") plt.xlabel("Latitude") plt.ylabel("Cloudiness") regress(lat,cloud) # - # Using a python function to run a linear regression analysis on the scatter plot for Northern Hemisphere cities against their cloud cover percentage # The line shows absolutely no correlation at all, clouds are not dependent on latitude to form # #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression # + lat = southern_df["Lat"] cloud = southern_df["Cloudiness"] plt.scatter(lat, cloud, marker="o") plt.xlabel("Latitude") plt.ylabel("Cloudiness") regress(lat,cloud) # - # Using a python function to run a linear regression analysis on the scatter plot for Southern Hemisphere cities against their cloud cover percentage # The line again (coincidentally) shows a slightly stronger correlation than its northern counterpart, but it still does not suggest any kind of relationship # #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression # + lat = northern_df["Lat"] wind = northern_df["Wind Speed"] plt.scatter(lat, wind, marker="o") plt.xlabel("Latitude") plt.ylabel("Wind Speed") regress(lat,wind) # - # Using a python function to run a linear regression analysis on the scatter plot for Northern Hemisphere cities against their wind speed # The line shows no correlation between latitude and wind speed # #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression # + lat = southern_df["Lat"] wind = southern_df["Wind Speed"] plt.scatter(lat, wind, marker="o") plt.xlabel("Latitude") plt.ylabel("Wind Speed") regress(lat,wind) # - # Using a python function to run a linear regression analysis on the scatter plot for Southern Hemisphere cities against their wind speed # The line shows no correlation between latitude and wind speed, but it is stronger than the nothern one
WeatherPy/WeatherPy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.11 64-bit (''base'': conda)' # name: python3 # --- # # Running Your First Models # # A Jupyter notebook in the `Python/` demonstrates how to use the Python UCLCHEM module. This page was automatically generated from that notebook. # # In this example, we'll run three UCLCHEM models using the cloud model and the python wrapper. We'll also use the python module to plot the results. # # Note, we ran this from the `Python/` directory meaning `Python/uclchem/` was in our PATH. You can run scripts elsewhere but `Python/uclchem/` must either be copied to that location or added to your PYTHON_PATH environmental variable. import uclchem import pandas as pd import matplotlib.pyplot as plt # # Phase 1 # # It's typical when running UCLCHEM to run the model in two phases. If, for example, one wishes to model a hot core, the gas should be well processed by the time the protostar "turns on". This could be achieved by initializing the abundances to some known values for a prestellar core but instead we do the following: # # - Run a model starting from some very diffuse, atomic gas # - Allow it to collapse in freefall to the density of the object we wish to model # - Store the abundances as the initial abundances for the hot core model # # UCLCHEM is set up to do this through the ```phase``` parameter. All physics modules will model a homogenous cloud that can collapse under freefall when ```phase=1``` and then they do their specific physical model when ```phase=2``` # + #set a parameter dictionary for phase 1 collapse model outSpecies="SO CO" param_dict = {"phase": 1, "switch": 1, "collapse": 1, "readAbunds": 0, "writeStep": 1, "outSpecies": outSpecies, "initialDens": 1e2, "initialTemp":10.0, "finalDens":1e5, "finalTime":5.0e6, "outputFile":"../examples/test-output/phase1-full.dat", "abundSaveFile":"../examples/test-output/startcollapse.dat"} uclchem.run_model(param_dict) # - # We can look at the output for that model by using pandas to read the file (skipping 2 rows to miss the header) and matplotlib to view abundances. phase1_df=uclchem.read_output_file("../examples/test-output/phase1-full.dat") phase1_df.head() species=["CO","#CO","HCN","#HCN"] fig,ax=uclchem.create_abundance_plot(phase1_df,species) ax=ax.set(xscale="log") # # Phase 2 # # Note the ```abundFile``` and ```readAbunds``` parameters in our phase 1 model. If ```readAbunds``` is set to 0 then the abundances at the end of the model are written to ```abundFile```. If, instead, ```readAbunds``` is set to 1, the initial abundances are read from that file. Thus by switching ```readAbunds``` over, we can run phase 2 starting from the final abundances of the previous model. # + #read old abundances and do hot core behaviour param_dict["readAbunds"]=1 param_dict["phase"]=2 #change other bits of input to set up phase 2 param_dict["initialDens"]=1e5 param_dict["tempindx"]=3 #selects mass of protostar (see cloud.f90) param_dict["finalTime"]=1e6 param_dict["switch"]=0 param_dict.pop("abundSaveFile") #this is still set to startcollapse.dat from phase 1 so remove it or change it. param_dict["abundLoadFile"]="../examples/test-output/startcollapse.dat" param_dict["outputFile"]="../examples/test-output/phase1-full.dat" uclchem.run_model(param_dict) # - phase2_df=uclchem.read_output_file("../examples/test-output/phase2-full.dat") phase2_df.tail() # + species=["CO","H2O","CH3OH","#CO","#H2O","#CH3OH"] fig,[ax,ax2]=plt.subplots(1,2,figsize=(16,9)) ax=uclchem.plot_species(ax,phase2_df,species) settings=ax.set(yscale="log",xlim=(1,1e6),ylim=(1e-10,1e-2), xlabel="Time / years", ylabel="Fractional Abundance",xscale="log") ax2.plot(phase2_df["Time"],phase2_df["Density"],color="black") ax3=ax2.twinx() ax3.plot(phase2_df["Time"],phase2_df["gasTemp"],color="red") ax2.set(xlabel="Time / year",ylabel="Density") ax3.set(ylabel="Temperature",facecolor="red",xlim=(0,1e6)) ax3.tick_params(axis='y', colors='red')
Python/first_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Spirals # # This Jupyter notebook should allow you to experiment with the plotting of logarithmic spirals using [Matplotlib](https://matplotlib.org/). Have a read through everything before you start running things. # # --- # # We begin by importing useful libraries: import numpy as np import matplotlib.pyplot as plt # ### Plotting a spiral # # We can now plot a spiral. We'll show how to plot a simple arithmetic (or Archimedean) spiral, and leave the logarithmic case to you. For this spiral, we'll plot a slightly scaled version of the simplest possible spiral: $r(\theta) = \dfrac{\theta}{\pi}$. In words, the radius of this spiral at any point is the angle (in radians) counted in units of $\pi$. # # In Python if we want to use the constant $\pi = 3.14159265..$ we can either define it ourselves or, better, use a constant from a library like NumPy; in this case since we've imported NumPy as `np`, we can get the constant $\pi$ using `np.pi`. # # We name our variables as the name of the Greek letter so $\theta$ becomes `theta`. $r$ stays as `r`. # # First off we define some limits for our variables. We'll start with $0 \le \theta \le 6\pi$, which should give us three complete turns. We also define `points_per_radian`. Mathematical functions are defined for every point in space, but computers cannot cope with an infinite number of values; instead we sample our function at this fixed number of points for each radian we plot. Too few, and the plot will look rough and unlike the real function; too many and our code will never be able to work out all of the values required and may crash. If you're curious, try altering this value once you've got the hang of things. # + # Define some useful parameters: theta_min = 0 theta_max = 6 * np.pi points_per_radian = 20 # Set up the array of points we're going to sample at to create a plot: theta_range = theta_max - theta_min theta_array = np.linspace(theta_min, theta_max, int(theta_range * points_per_radian)) # Equation of the spiral: r_array = theta_array / np.pi # r(theta) = theta/pi # Initialise a new figure and then plot the spiral: plt.figure(figsize=(6, 6)) # 'figsize' is the width and height plt.polar(theta_array, r_array, linewidth=3, color='blue') # the first two arguments are the angle # and the radius at that angle: r(theta) # Label the axes: plt.thetagrids(np.arange(0, 360, 90)) # set the positions of the theta labels plt.gca().set_rlabel_position(0) # set the angle of the radius labels plt.gca().grid(linestyle='--', linewidth=0.5) # make the grid thin and dashed # Display the result: plt.tight_layout() plt.show() # - # ### Exploring the parameters # # We started off plotting $0 \le \theta \le 6\pi$ (i.e. `theta_min = 0`, `theta_max = 6*np.pi`), which produced a simple spiral that made three complete turns. It started at the origin and the radius increased linearly with the angle. The polar plot means that we see angles larger than $2\pi$ (or $360^{\circ}$) back in the range $0$ to $2\pi$, and so see a continuous spiral. (Imagine rotating a pencil by $450^{\circ}$; we cannot see the difference in the final position between this and a rotation of just $90^{\circ}$, except that here the spiral radius increases with angle unlike a pencil of fixed length. The spiral is more like unwinding a coil of string whilst pulling it taut). # # When you're running this notebook yourself for the first time, go to the menu at the top and click "Cell" -> "Run All". Look at the plot this produces above. To explore, you can change the parameters in the cell above and press `Ctrl-Enter` to re-run that cell and update the plot. You may want to try some or all of the suggestions below, to get a feel for logarithmic spirals. # # --- # # #### To try: # # We plotted the arithmetic spiral in the range $0 \le \theta \le 6\pi$; what happens if you change `theta_min` and `theta_max` to plot $6\pi \le \theta \le 12\pi$ ? Change the values back after trying this out, but look at the shape it makes. # # What are the distances between the origin and the points where the spiral crosses the line $\theta = 0^{\circ}$, and how do they increase for subsquent terms? What type of series is this? # # # ##### Logarithmic spirals: # # The spiral we started with is not a log spiral. If, for log spirals, $\dfrac{\textrm{d} r}{\textrm{d} \theta} = br$; what is $r(\theta)$? # # Change the line above that sets the equation of the spiral to be this new function of theta (`r_array = ...`), defining the new variable $b$ on a line above as well. Note that when multiplying and dividing NumPy arrays, the operations are element by element, unlike matricies. # Use $b = 0.11$ to start out with, since this is a nice example case. You may find it useful to know that the NumPy module supports the natural logarithm as `np.log(...)`, the exponential function as `np.exp(...)` and also trignometric functions as e.g. `np.sin(...)`. # # What are the distances between the origin and the points where the logarithmic spiral crosses the line $\theta = 0^{\circ}$, and how do these increase for subsequent turns? What type of series do these distances form? (This is easiest to see when `b = 0.11`, `theta_min = 0` and `theta_max = 6 * np.pi`). # # What happens to a log spiral if instead we plot it for the range $6\pi \le \theta \le 12\pi$ but leave $b$ the same? Keep an eye on both the shape and the numbers on the axis when you change the range. Why should we expect this behaviour, and why it is so different from the arithmetic spiral? Change the range back when you're done. # # What is the value of $b$ for the golden spiral? Plot a golden spiral. Note that you don't have to set $b$ to be a numeric value worked out using a calculator, but can define intermediate variables and enter the algebraic value by making use of the NumPy functions. Then try exploring other values of $b$, to make the three turns as clear as possible. # # It is possible to plot both an arithmetic and logarithmic spiral on the same plot using Matplotlib. Create two arrays for the radius this time: `r_array_arith` and `r_array_log`, using the two functions of $\theta$ we've met. Repeat the `plt.polar(...)` line, for both these $r$ arrays; choose a different colour for one of them (you can use names like `'red'` and `'blue'`), and then run the code. The `b = 0.11` case is a good one to contrast to the arithmetic spiral $ r = \frac{\theta}{\pi}$. # # You can also save images of your plots, if you want, by adding "`plt.savefig('spiral.png')`" to the end of the code above, modifying the name of the file to something sensible.
spirals.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #Please Click on the Voila button above to start the web application. # + import warnings warnings.filterwarnings('ignore') import ipywidgets as widgets from IPython.display import display, clear_output #importing Libraries for data analysis import pandas as pd import numpy as np import matplotlib.pyplot as plt from IPython.display import display from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import CountVectorizer # + # #!jupyter nbextension install --sys-prefix --symlink --overwrite --py voila-gridstack # #!jupyter serverextension enable voila --sys-prefix # - #load dataset df = pd.read_csv("top_1000_movies.csv") columns = ['Movie_id', 'Series_Title', 'Genre', 'IMDB_Rating','Director', 'Star1', 'Star2', 'Star3', 'Star4'] movie_list = df['Series_Title'] blank = df.isnull().values.any(); if(blank == True): df.replace(r'^\s*$', np.nan, regex=True) def get_important_features(data): important_features = [] for i in range(0, data.shape[0]): important_features.append(data['Series_Title'][i] + ' ' + data['Genre'][i] + ' ' + str(data['IMDB_Rating'][i]) + ' ' + data['Director'][i] + ' ' + data['Star1'][i]+ ' ' + data['Star2'][i]+ ' ' + data['Star3'][i]+ ' ' + data['Star4'][i]) return important_features def movie_rec(movie_name): df = pd.read_csv("top_1000_movies.csv") df['important_features'] = get_important_features(df) cm = CountVectorizer().fit_transform(df['important_features']) cs = cosine_similarity(cm) movie_id = df[df.Series_Title == movie_name]['Movie_id'].values[0] scores = list(enumerate(cs[movie_id])) sorted_scores = sorted(scores, key = lambda x:x[1], reverse = True) sorted_scores = sorted_scores[1:] j = 0 print('If you liked', movie_name, ' you might also be interested in these 10 movies:\n') for items in sorted_scores: movie_title = df[df.Movie_id == items[0]]['Series_Title'].values[0] print(j+1, movie_title) j = j+1 if j > 9: break def rating_calc(rating): average = 0 for i in range(len(rating)): average = rating[i] + average average = average/len(rating) rounded = round(average, 2) print('The current rating of the movie recommendation system based on user feedback is ' + str(rounded)) # + label_headline = widgets.HTML(value="<h1>Movie Recommendation System(Predictive Model)</h1>") label_explaination = widgets.HTML(value="<p>To use the movie recommendation below, please select a movie from the list. After your selection, you will the top five movies similar to your selection. </p>") jump = widgets.HTML(value = "<a href='#GenreDist' color:Blue>Genre Distribution</a> | <a href='#TopDirector' color:Blue>Top 10 Directors</a> | <a href='#IMDB' color:Blue>IMDB Rating Vs. Meta Score</a>") choose_movie = widgets.Dropdown(layout={'width': '500px'}, options= df['Series_Title'], value= None , description='Pick a Movie: ', ) rating = [] display(label_headline) display(jump) display(label_explaination) display(choose_movie) output = widgets.Output() display(output) @output.capture() def on_change(change): if change['name'] == 'value' and (change['new'] != change['old']): clear_output() movie_rec(change['new']) choose_movie.observe(on_change) # + rating_headline = widgets.HTML(value="<h1>Rating System (Machine Learning Algorithm Accuracy Tracker)</h1>") rating_explaination = widgets.HTML(value="<p>The dropbox below is an example of the accuracy tracking function for the movie recommendation system. It records user feedback from their recommendation ratings, and then calculate the average. </p>") feedback = widgets.Dropdown(layout={'width': '200px'}, options= [1, 2, 3, 4, 5], value= None , description='Rate Us: ', ) display(rating_headline) display(rating_explaination) display(feedback) output = widgets.Output() display(output) @output.capture() def on_change(change): if change['name'] == 'value' and (change['new'] != change['old']): clear_output() rating.append(change['new']) rating_calc(rating) feedback.observe(on_change) # + label_headline2 = widgets.HTML(value="<a id='GenreDist'></a><h2>Genre Distribution</h2>") label_explaination2 = widgets.HTML(value="<p>The graph below contains the genre distribution of the movies currently on the database. In the future it could be used to determine what genres needed to be added to our streaming services so the distribution is not skewed. So that our services can appeal to a wider audience. </p>") display(label_headline2) display(label_explaination2) dummy = df['Genre'].str.get_dummies(sep=', ') genre_distribution = dummy.sum().sort_values(ascending=False).index.values genre_count = dummy.sum().sort_values(ascending=False).values # fig, (ax1) = plt.subplots(figsize=(21, 15)) # ax1 = plt.pie(genre_distribution, genre_count) mylabels = ['Drama', 'Comedy', 'Crime', 'Adventure', 'Action', 'Thriller', 'Romance', 'Biography', 'Mystery', 'Animation', 'Sci-Fi', 'Fantasy', 'Family', 'History', 'War', 'Music', 'Horror', 'Western', 'Film-Noir', 'Sport', 'Musical'] plt.pie(genre_count, labels = mylabels, radius = 4, autopct='%1.1f%%') plt.show() # + label_headline3 = widgets.HTML(value="<a id='TopDirector'></a><h2>Top 10 Directors by Gross Earnings</h2>") label_explaination3 = widgets.HTML(value="<p>The graph below contains the top 10 directors based on their movies gross earnings. </p>") display(label_headline3) display(label_explaination3) gross_by_director = df['Gross'].groupby(df['Director']).sum().sort_values(ascending=False).head(10) gross_by_director.plot.bar(figsize=(15, 12)) plt.xlabel("Directors") plt.ylabel("Movie gross earning in billions") plt.title('Top 10 Directors Gross Earnings') plt.show() # + label_headline4 = widgets.HTML(value="<a id='IMDB'></a><h2>IMDB Rating Vs. Meta Score by Gross Earnings </h2>") label_explaination4 = widgets.HTML(value="<p>The graph below compares the top 10 movies based on Meta scores and IMDB rating to their gross earnings. </p>") display(label_headline4) display(label_explaination4) gross_by_imdb_key = df['Gross'].groupby(df['IMDB_Rating']).sum().sort_values(ascending=False).head(10).index.values gross_by_meta_key = df['Gross'].groupby(df['Meta_score']).sum().sort_values(ascending=False).head(10).index.values gross_by_imdb = df['Gross'].groupby(df['IMDB_Rating']).sum().sort_values(ascending=False).head(10).values gross_by_meta = df['Gross'].groupby(df['Meta_score']).sum().sort_values(ascending=False).head(10).values bar_width = 0.2 fig, ax = plt.subplots(figsize=(15, 12)) summer = ax.bar(gross_by_imdb_key, gross_by_imdb, bar_width,label="IMDB Score") Winter = ax.bar(gross_by_meta_key/10, gross_by_meta, bar_width,label="Meta Score") ax.set_xlabel('Normalized Score') ax.set_ylabel('Gross Earnings in Billions') ax.set_title('Gross Earnings: Meta Score vs IMDB Score') ax.legend() # -
Movie-Recommendation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy.optimize import fsolve def f(x): return x**3 -5*x**2 - 2*x +10 def bisection (a,b,tol): xl = a xr = b while (np.abs(xl-xr)>= tol): c = (xl+xr)/2.0 prod = f(xl)*f(c) if prod > tol : xl = c else: if prod < tol: xr=c return c answer = bisection (-2,2,1e-10) print("answer of this question ",answer) # + def f(x): return x**3 +1 def bisection (a,b,tol): xl = a xr = b while (np.abs(xl-xr)>= tol): c = (xl+xr)/2.0 prod = f(xl)*f(c) if prod > tol : xl = c else: if prod < tol: xr=c return c answer = bisection (-5,5,1e-10) print("answer of this question ",answer) # + ######NRNRNRNRNRNRNNRNRRNNR### # + x=20 for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.001 : break x = xnr_new print("the root is f% at %d iterations"%(xnr_new,i)) # + x=5 for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.0000001 : break x = xnr_new print("the root is %.6f at %d iterations"%(xnr_new,i)) # + def newton_rapsonmtd(x): for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.0000001 : break x = xnr_new print("the root is %.6f at %d iterations"%(xnr_new,i)) # + def newton_rapsonmtd(fn,dfn,x,tol,maxiter): for i in range(maxiter): xnr_new = x - fn(x) / dfn(x) if abs(xnr_new - x)< tol : break x = xnr_new return xnr_new,i y= lambda x :2*x**3 - 9.5*x+7.5 dy = lambda x:6*x**2-9.5 newton_rapsonmtd(y,dy,5,0.00001,100) print("the root is %.6f at %d iterations"%(xnr_new,i)) # + #### # + x=-10 for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.0000001 : break x = xnr_new print("the root is %.6f at %d iterations"%(xnr_new,i)) # + x=-10 for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.0000001 : break x = xnr_new print("the root is %.3f at %d iterations"%(xnr_new,i)) # + x=-10 for i in range(100): xnr_new = x - (2*x**3 - 9.5*x+7.5)/(6*x**2-9.5) if abs(xnr_new - x)< 0.0000001 : break x = xnr_new print("the root is %.3f at %d iterations"%(x,i)) # + def nrapson2 (y,x,tol,maxiter): for i in range(maxiter): xnew2= x - y[0](x)/y[1](x) if abs(xnew2-x) < tol : break xnew2 = x print("the root of the problems is %.3f at %.3d iterations" %(x,i)) y= [lambda x: 2*x**3 - 9.5*x+7.5 , lambda x: 6*x**2-9.5 ] nrapson2(y,20,0.00001,100) # + ###### ZEKARİYANIN KİTABINDAN <NAME>ÜMÜ FONKSİYONU OLUŞTURMA##### #SAYFA130# # + def nrapsonz(x,nmaxiter,tol): for i in (nmaxiter): xnew = x - func(x)/funcx(x) if abs(xnew-x) < tol :break xnew = x print("problemin çözümü %.6f in %.6d iterasyonda"%(x,i)) func = lambda x : (exp(x)**-x) - (sin(pix*x/4)) funcx =lambda x : (-exp(x)**-x) - pi/4*(cos(pix*x/4)) nrapsonz(0,4,0.5*10**(-4)) # -
def newton rapson.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Advanced Topics: Additional Filtering # # The filtering examples we've shown to this point have been pretty simple, either comparisons between columns or fixed values, or set filter functions like `isin` and `notin`. # # Ibis supports a number of richer analytical filters that can involve one or more of: # # - Aggregates computed from the same or other tables # - Conditional aggregates (in SQL-speak these are similar to "correlated subqueries") # - "Existence" set filters (equivalent to the SQL `EXISTS` and `NOT EXISTS` keywords) # ## Setup import ibis import os hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070) hdfs = ibis.impala.hdfs_connect(host='impala', port=hdfs_port) con = ibis.impala.connect(host='impala', database='ibis_testing', hdfs_client=hdfs) ibis.options.interactive = True # ## Using scalar aggregates in filters table = con.table('functional_alltypes') table.limit(5) # We could always compute some aggregate value from the table and use that in another expression, or we can use a data-derived aggregate in the filter. Take the average of a column for example: table.double_col.mean() # You can use this expression as a substitute for a scalar value in a filter, and the execution engine will combine everything into a single query rather than having to access Impala multiple times: cond = table.bigint_col > table.double_col.mean() expr = table[cond & table.bool_col].limit(5) expr # ## Conditional aggregates # # # Suppose that we wish to filter using an aggregate computed conditional on some other expressions holding true. Using the TPC-H datasets, suppose that we want to filter customers based on the following criteria: Orders such that their amount exceeds the average amount for their sales region over the whole dataset. This can be computed any numbers of ways (such as joining auxiliary tables and filtering post-join) # # Again, from prior examples, here are the joined up tables with all the customer data: # + region = con.table('tpch_region') nation = con.table('tpch_nation') customer = con.table('tpch_customer') orders = con.table('tpch_orders') fields_of_interest = [customer, region.r_name.name('region'), orders.o_totalprice, orders.o_orderdate.cast('timestamp').name('odate')] tpch = (region.join(nation, region.r_regionkey == nation.n_regionkey) .join(customer, customer.c_nationkey == nation.n_nationkey) .join(orders, orders.o_custkey == customer.c_custkey) [fields_of_interest]) tpch.limit(5) # - # In this particular case, filtering based on the conditional average `o_totalprice` by region requires creating a table view (similar to the self-join examples from earlier) that can be treated as a distinct table entity in the expression. This would **not** be required if we were computing a conditional statistic from some other table. So this is a little more complicated than some other cases would be: t2 = tpch.view() conditional_avg = t2[(t2.region == tpch.region)].o_totalprice.mean() # Once you've done this, you can use the conditional average in a filter expression amount_filter = tpch.o_totalprice > conditional_avg tpch[amount_filter].limit(10) # By looking at the table sizes before and after applying the filter you can see the relative size of the subset taken. tpch.count() tpch[amount_filter].count() # Or even group by year and compare before and after: tpch.schema() # + year = tpch.odate.year().name('year') pre_sizes = tpch.group_by(year).size() post_sizes = tpch[amount_filter].group_by(year).size().view() percent = ((post_sizes['count'] / pre_sizes['count'].cast('double')) .name('fraction')) expr = (pre_sizes.join(post_sizes, pre_sizes.year == post_sizes.year) [pre_sizes.year, pre_sizes['count'].name('pre_count'), post_sizes['count'].name('post_count'), percent]) expr # - # ## "Existence" filters # # # Some filtering involves checking for the existence of a particular value in a column of another table, or amount the results of some value expression. This is common in many-to-many relationships, and can be performed in numerous different ways, but it's nice to be able to express it with a single concise statement and let Ibis compute it optimally. # # Here's some examples: # # - Filter down to customers having at least one open order # - Find customers having no open orders with 1-URGENT status # - Find stores (in the stores table) having the same name as a vendor (in the vendors table). # # We'll go ahead and solve the first couple of these problems using the TPC-H tables to illustrate the API: customer = con.table('tpch_customer') orders = con.table('tpch_orders') orders.limit(5) # We introduce the `any` reduction: has_open_orders = ((orders.o_orderstatus == 'O') & (customer.c_custkey == orders.o_custkey)).any() # This is now a valid filter: customer[has_open_orders].limit(10) # For the second example, in which we want to find customers not having any open urgent orders, we write down the condition that they _do_ have some first: has_open_urgent_orders = ((orders.o_orderstatus == 'O') & (orders.o_orderpriority == '1-URGENT') & (customer.c_custkey == orders.o_custkey)).any() # Now, we can negate this condition and use it as a filter: customer[-has_open_urgent_orders].count() # In this case, it is true that `customer.c_custkey` has no duplicate values, but that need not be the case. There could be multiple copies of any given value in either table column being compared, and the behavior will be the same (existence or non-existence is verified).
docs/source/tutorial/07-Advanced-Topics-ComplexFiltering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: dev # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Import libraries and dependencies import numpy as np import pandas as pd import os from datetime import datetime, timedelta import matplotlib.pyplot as plt import alpaca_trade_api as tradeapi # %matplotlib inline np.random.seed(42) # # Portfolio Planner # # In this activity, you will use the Alpaca api to grab historical data for a 60/40 portfolio using `SPY` to represent the stock portion and `AGG` to represent the bonds. # + # Load .env enviroment variables from dotenv import load_dotenv load_dotenv() # Set Alpaca API key and secret alpaca_api_key = os.getenv("ALPACA_API_KEY") alpaca_secret_key = os.getenv("ALPACA_SECRET_KEY") # api = tradeapi.REST() api = tradeapi.REST(alpaca_api_key, alpaca_secret_key, api_version='v2') # - # # Data Collection # # In this step, you will need to use the Alpaca api to fetch closing prices for the `SPY` and `AGG` tickers. Save the results as a pandas DataFrame # + # Get all Asstes assets = api.list_assets() # Keep only tradeable assets tradeable = [asset for asset in assets if asset.tradable ] # Create a new empty DataFrame asset_info_df = pd.DataFrame() asset_info_df['symbol'] = pd.Series([asset.symbol for asset in assets]) # Display the first 10 asset tickers # Set the ticker myportfolio=["AGG","SPY"] timeframe = '1D' # Set start and end datetimes of 1 year, between now and 365 days ago. end_date = datetime.now() start_date = end_date + timedelta(-365) # Get 1 year's worth of historical data for AGG,SPY df=api.get_barset( myportfolio, timeframe, limit=None, start=start_date, end=end_date, after=None, until=None, ).df #df.head(2) # - df = df.droplevel(axis=1, level=0) df.drop(columns=['open', 'high', 'low', 'volume'], inplace=True) df.index = df.index.date df.columns=['AGG','SPY'] df['port']=.4*df['AGG']+0.6*df['SPY'] df.drop(columns=['AGG','SPY'], inplace=True) #df.head(2) # + # Calculate the Daily Return of the Portfolio # - # Use the `pct_change` function to calculate daily returns of AAPL daily_returns=df.pct_change() daily_returns.head() Calculate the Volatility of Return # Use the `std` function to calculate the standard deviation of daily returns for AAPL std_dev_daily_return= daily_returns.std()['port'] std_dev_daily_return # Set number of trading days and get last closing price of AAPL from DataFrame num_trading_days = 252 *30 num_simulations = 500 last_price =df['port'][-1] last_price # + # Initialize the simulated prices list with the last closing price of AAPL simulated_prices = [last_price] avg_daily_return = daily_returns.mean()['port'] # Initialize empty DataFrame to hold simulated prices for each simulation simulated_price_df = pd.DataFrame() # Run the simulation of projecting stock prices for the next trading year, `500` times for n in range(num_simulations): # Initialize the simulated prices list with the last closing price of AAPL simulated_prices = [last_price] # Simulate the returns for 252 *30 days for i in range(num_trading_days): # Calculate the simulated price using the last price within the list simulated_price = simulated_prices[-1] * (1 + np.random.normal(avg_daily_return, std_dev_daily_return)) # Append the simulated price to the list simulated_prices.append(simulated_price) # Append a simulated prices of each simulation to DataFrame simulated_price_df[f"Simulation {n+1}"] = pd.Series(simulated_prices) # Print head of DataFrame simulated_price_df.head() # - # Use the `plot` function to plot the trajectory of Portfolio stock based on a 252 * 30 trading day simulation plot_title = f"{n+1} Simulations of Portfolion Trajectory Over the Next 30 years" simulated_price_df.plot(legend=None, title=plot_title) # Calculate the daily returns of simulated prices simulated_daily_returns = simulated_price_df.pct_change() # Calculate the normalized, cumulative return series portfolio_cumulative_returns= (1 + simulated_daily_returns.fillna(0)).cumprod() # Print records from the DataFrame portfolio_cumulative_returns.head() # Use the `plot` function to plot `500` simulations of the potential trajectories of the portfolio based on 252 trading days plot_title = f"{n+1} Simulations of Cumulative Portfolio Return Trajectories Over the Next 3 years" portfolio_cumulative_returns.plot(legend=None, title=plot_title) # Select the last row for project stock prices (prices of each simulation on the 252nd trading day) ending_cumulative_returns = portfolio_cumulative_returns.iloc[-1, :] ending_cumulative_returns.value_counts(bins=10) / len(ending_cumulative_returns) # Use the `quantile` function to calculate the 90% confidence interval for simulated ending prices confidence_interval = ending_cumulative_returns.quantile(q=[0.05, 0.95]) confidence_interval # Use the `plot` function to create a probability distribution histogram of simulated ending prices # with markings for a 95% confidence interval plt.figure(); ending_cumulative_returns.plot(kind='hist', density=True, bins=10) plt.axvline(confidence_interval.iloc[0], color='r') plt.axvline(confidence_interval.iloc[1], color='r') confidence_interval_90 = ending_cumulative_returns.quantile(q=[0.05, 0.95]) confidence_interval_90 confidence_interval_50 = ending_cumulative_returns.quantile(q=[0.25, 0.75]) confidence_interval_50 confidence_interval_10 = ending_cumulative_returns.quantile(q=[0.40, 0.50]) confidence_interval_10 # + # Set initial investment initial_investment = 20000 # Calculate investment profit/loss of lower and upper bound cumulative portfolio returns investment_pnl_lower_bound = initial_investment + (initial_investment * confidence_interval_90.iloc[0]) investment_pnl_upper_bound = initial_investment + (initial_investment * confidence_interval_90.iloc[1]) # Print the results print(f"There is a 90% chance that an initial investment of $20,000 in the portfolio" f" over the next 30 years will end within in the range of" f" ${investment_pnl_lower_bound} and ${investment_pnl_upper_bound}") # + # Set initial investment initial_investment = 20000 # Calculate investment profit/loss of lower and upper bound cumulative portfolio returns investment_pnl_lower_bound = initial_investment + (initial_investment * confidence_interval_50.iloc[0]) investment_pnl_upper_bound = initial_investment + (initial_investment * confidence_interval_50.iloc[1]) # Print the results print(f"There is a 50% chance that an initial investment of $20,000 in the portfolio" f" over the next 30 years will end within in the range of" f" ${investment_pnl_lower_bound} and ${investment_pnl_upper_bound}") # + #Set initial investment initial_investment = 20000 # Calculate investment profit/loss of lower and upper bound cumulative portfolio returns investment_pnl_lower_bound = initial_investment + (initial_investment * confidence_interval_10.iloc[0]) investment_pnl_upper_bound = initial_investment + (initial_investment * confidence_interval_10.iloc[1]) # Print the results print(f"There is a 10% chance that an initial investment of $20,000 in the portfolio" f" over the next 30 years will end within in the range of" f" ${investment_pnl_lower_bound} and ${investment_pnl_upper_bound}") # + #Given the current projected annual income from the Plaid analysis, will a 4% withdrawal rate meet or exceed that value at the 10th percentile? Note: This is basically determining if retirement income is equivalent to current income. # + # No, it would not. 4% with witdrawel rate would give you $800 # your projected income is $7389 # - How would a 50% increase in the initial investment amount affect the 4% retirement withdrawal? In other words, what happens if the initial investment had been bigger? # + #Set initial investment initial_investment = 30000 # Calculate investment profit/loss of lower and upper bound cumulative portfolio returns investment_pnl_lower_bound = initial_investment + (initial_investment * confidence_interval_10.iloc[0]) investment_pnl_upper_bound = initial_investment + (initial_investment * confidence_interval_10.iloc[1]) # Print the results print(f"There is a 10% chance that an initial investment of $20,000 in the portfolio" f" over the next 30 years will end within in the range of" f" ${investment_pnl_lower_bound} and ${investment_pnl_upper_bound}") # - Still 4% withdraw rate would give you $1200. it is lower than $7389 # # Monte Carlo Simulation # # In this step, you will run Monte Carlo Simulations for your portfolio to model portfolio performance at different retirement ages. # # Complete the following steps: # 1. Calculate the daily returns for the SPY and AGG closing prices. # 2. Calculate volatility for both the SPY and AGG closing prices. # 3. Find the last day's closing price for both stocks and save those as variables. # 4. Run a Monte Carlo Simulation of at least 500 iterations and generate at least 30 years of closing prices # # ### HINTS: # There are 252 trading days per year, so the number of records to generate for each Monte Carlo run will be 252 days * 30 years # # Calculate the daily returns for AGG closing prices # Use the `pct_change` function to calculate daily returns of AAPL daily_returns = df.pct_change() daily_returns.head() # # Calculate the daily returns for the SPY closing prices. # Use the `pct_change` function to calculate daily returns of AAPL daily_returns_spy = spy.pct_change() daily_returns_spy.head() # # Calculate volatility SPY # Use the `std` function to calculate the standard deviation of daily returns for AAPL std_dev_daily_return_spy = daily_returns_spy.std()['close'] std_dev_daily_return_spy # # Calculate volatility AGG # Use the `std` function to calculate the standard deviation of daily returns for AAPL std_dev_daily_return_agg = daily_returns_agg.std()['close'] std_dev_daily_return_agg # # Find the last day's closing price for both stocks and save those as variables. # Set number of trading days and get last closing price of AAPL from DataFrame num_trading_days = 252 *30 num_simulations = 500 spy_last_price =spy_n['SPY'][-1] agg_last_price =agg_n['AGG'][-1] spy_last_price # + # Initialize the simulated prices list with the last closing price of AAPL simulated_prices = [spy_last_price] avg_daily_return_spy = daily_returns_spy.mean()['close'] # Initialize empty DataFrame to hold simulated prices for each simulation simulated_spyprice_df = pd.DataFrame() # Run the simulation of projecting stock prices for the next trading year, `1000` times for n in range(num_simulations): # Initialize the simulated prices list with the last closing price of AAPL simulated_spy_prices = [spy_last_price] # Simulate the returns for 252 days for i in range(num_trading_days): # Calculate the simulated price using the last price within the list simulated_price_spy = simulated_spy_prices[-1] * (1 + np.random.normal(avg_daily_return_spy, std_dev_daily_return_spy)) # Append the simulated price to the list simulated_spy_prices.append(simulated_price_spy) # Append a simulated prices of each simulation to DataFrame simulated_spyprice_df[f"Simulation {n+1}"] = pd.Series(simulated_spy_prices) # Print head of DataFrame simulated_spyprice_df.head() # - # # Run a Monte Carlo Simulation of at least 500 iterations and generate at least 30 years of closing prices # + # Initialize the simulated prices list with the last closing price of AAPL simulated_spy_prices = [spy_last_price] avg_daily_return_spy = daily_returns_spy.mean()['close'] # Initialize empty DataFrame to hold simulated prices for each simulation simulated_spyprice_df = pd.DataFrame() # Run the simulation of projecting stock prices for the next trading year, `1000` times for n in range(num_simulations): # Initialize the simulated prices list with the last closing price of AAPL simulated_spy_prices = [spy_last_price] # Simulate the returns for 252 days for i in range(num_trading_days): # Calculate the simulated price using the last price within the list simulated_price_spy = simulated_spy_prices[-1] * (1 + np.random.normal(avg_daily_return_spy, std_dev_daily_return_spy)) # Append the simulated price to the list simulated_spy_prices.append(simulated_price_spy) # Append a simulated prices of each simulation to DataFrame simulated_spyprice_df[f"Simulation {n+1}"] = pd.Series(simulated_spy_prices) # Print head of DataFrame simulated_spyprice_df.head() # + # Calculate the # Initialize the simulated prices list with the last closing price of AAPL simulated_agg_prices = [agg_last_price] avg_daily_return_agg = daily_returns_agg.mean()['close'] # Initialize empty DataFrame to hold simulated prices for each simulation simulated_aggprice_df = pd.DataFrame() # Run the simulation of projecting stock prices for the next trading year, `1000` times for n in range(num_simulations): # Initialize the simulated prices list with the last closing price of AAPL simulated_agg_prices = [agg_last_price] # Simulate the returns for 252 days for i in range(num_trading_days): # Calculate the simulated price using the last price within the list simulated_price_agg = simulated_agg_prices[-1] * (1 + np.random.normal(avg_daily_return_agg, std_dev_daily_return_agg)) # Append the simulated price to the list simulated_agg_prices.append(simulated_price_agg) # Append a simulated prices of each simulation to DataFrame simulated_aggprice_df[f"Simulation {n+1}"] = pd.Series(simulated_agg_prices) # Print head of DataFrame simulated_aggprice_df.head() # + jupyter={"outputs_hidden": true} # Use the `plot` function to plot the trajectory of SPY stock based on a 252 trading day simulation plot_title = f"{n+1} Simulations of SPY Stock Price Trajectory Over the Next 252 Trading Days" simulated_spyprice_df.plot(legend=None, title=plot_title) # - # Use the `plot` function to plot the trajectory of AGG stock based on a 252 trading day simulation plot_title = f"{n+1} Simulations of SPY Stock Price Trajectory Over the Next 252 Trading Days" simulated_aggprice_df.plot(legend=None, title=plot_title) # + # Select the last row for the cumulative returns (cumulative returns at 30 years) # YOUR CODE HERE # + # Select the last row for the cumulative returns (cumulative returns at 20 years) # YOUR CODE HERE # + # Display the 90% confidence interval for the ending returns # YOUR CODE HERE # + # Visualize the distribution of the ending returns # YOUR CODE HERE # - # --- # # Retirement Analysis # # In this section, you will use the monte carlo model to answer the following retirement planning questions: # # 1. What are the expected cumulative returns at 30 years for the 10th, 50th, and 90th percentiles? # 2. Given an initial investment of `$20,000`, what is the expected portfolio return in dollars at the 10th, 50th, and 90th percentiles? # 3. Given the current projected annual income from the Plaid analysis, will a 4% withdraw rate from the retirement portfolio meet or exceed that value at the 10th percentile? # 4. How would a 50% increase in the initial investment amount affect the 4% retirement withdrawal? # ### What are the expected cumulative returns at 30 years for the 10th, 50th, and 90th percentiles? # + # YOUR CODE HERE # - # ### Given an initial investment of `$20,000`, what is the expected portfolio return in dollars at the 10th, 50th, and 90th percentiles? # + # YOUR CODE HERE # - # ### Given the current projected annual income from the Plaid analysis, will a 4% withdraw rate from the retirement portfolio meet or exceed that value at the 10th percentile? # # Note: This is effectively saying that 90% of the expected returns will be greater than the return at the 10th percentile, so this can help measure the uncertainty about having enough funds at retirement # + # YOUR CODE HERE # - # ### How would a 50% increase in the initial investment amount affect the 4% retirement withdrawal? # + # YOUR CODE HERE # - # ### Optional Challenge # # In this section, you will calculate and plot the cumulative returns for the median and 90% confidence intervals. This plot shows the expected cumulative returns for any given day between the first day and the last day of investment. # + # YOUR CODE HERE
portfolio_planner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Lesson 3 Exercise 2: Focus on Primary Key # <img src="images/cassandralogo.png" width="250" height="250"> # ### Walk through the basics of creating a table with a good Primary Key in Apache Cassandra, inserting rows of data, and doing a simple CQL query to validate the information. # # ### Replace ##### with your own answers. # # Note: __Do not__ click the blue Preview button in the lower task bar # #### We will use a python wrapper/ python driver called cassandra to run the Apache Cassandra queries. This library should be preinstalled but in the future to install this library you can run this command in a notebook to install locally: # # ! pip install cassandra-driver # #### More documentation can be found here: https://datastax.github.io/python-driver/ # #### Import Apache Cassandra python package import cassandra # ### Create a connection to the database from cassandra.cluster import Cluster try: cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance session = cluster.connect() except Exception as e: print(e) # ### Create a keyspace to work in # + try: session.execute(""" CREATE KEYSPACE IF NOT EXISTS udacity WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }""" ) except Exception as e: print(e) # - # #### Connect to the Keyspace. Compare this to how we had to create a new session in PostgreSQL. try: session.set_keyspace('udacity') except Exception as e: print(e) # ### Imagine you need to create a new Music Library of albums # # ### Here is the information asked of the data: # #### 1. Give every album in the music library that was created by a given artist # `select * from music_library WHERE artist_name="<NAME>"` # # ### Here is the collection of data # <img src="images/table3.png" width="650" height="350"> # #### Practice by making the PRIMARY KEY only 1 Column (not 2 or more) query = "CREATE TABLE IF NOT EXISTS music_library " query = query + "(year int, city text, artist_name text, album_name text, PRIMARY KEY (artist_name))" try: session.execute(query) except Exception as e: print(e) # ### Let's insert the data into the table # + query = "INSERT INTO music_library (year, artist_name, album_name, city)" query = query + " VALUES (%s, %s, %s, %s)" try: session.execute(query, (1970, "The Beatles", "Let it Be", "Liverpool")) except Exception as e: print(e) try: session.execute(query, (1965, "The Beatles", "Rubber Soul", "Oxford")) except Exception as e: print(e) try: session.execute(query, (1965, "The Who", "My Generation", "London")) except Exception as e: print(e) try: session.execute(query, (1966, "The Monkees", "The Monkees", "Los Angeles")) except Exception as e: print(e) try: session.execute(query, (1970, "The Carpenters", "Close To You", "San Diego")) except Exception as e: print(e) # - # ### Validate the Data Model -- Does it give you two rows? # + query = "select * from music_library WHERE artist_name='The Beatles'" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.year, row.artist_name, row.album_name, row.city) # - # ### If you used just one column as your PRIMARY KEY, your output should be: # 1965 The Beatles Rubber Soul Oxford # # # ### That didn't work out as planned! Why is that? Did you create a unique primary key? # ### Try again - Create a new table with a composite key this time query = "CREATE TABLE IF NOT EXISTS music_library1 " query = query + "(year int, city text, artist_name text, album_name text, PRIMARY KEY (artist_name, album_name))" try: session.execute(query) except Exception as e: print(e) # + ## You can opt to change the sequence of columns to match your composite key. \ ## Make sure to match the values in the INSERT statement query = "INSERT INTO music_library1 (year, artist_name, album_name, city)" query = query + " VALUES (%s, %s, %s, %s)" try: session.execute(query, (1970, "The Beatles", "Let it Be", "Liverpool")) except Exception as e: print(e) try: session.execute(query, (1965, "The Beatles", "Rubber Soul", "Oxford")) except Exception as e: print(e) try: session.execute(query, (1965, "The Who", "My Generation", "London")) except Exception as e: print(e) try: session.execute(query, (1966, "The Monkees", "The Monkees", "Los Angeles")) except Exception as e: print(e) try: session.execute(query, (1970, "The Carpenters", "Close To You", "San Diego")) except Exception as e: print(e) # - # ### Validate the Data Model -- Did it work? # + query = "SELECT * FROM music_library1 WHERE artist_name='The Beatles'" try: rows = session.execute(query) except Exception as e: print(e) for row in rows: print (row.year, row.artist_name, row.album_name, row.city) # - # ### Your output should be: # 1970 The Beatles Let it Be Liverpool<br> # 1965 The Beatles Rubber Soul Oxford # ### Drop the tables # + query = "DROP TABLE IF EXISTS music_library" try: rows = session.execute(query) except Exception as e: print(e) query = "DROP TABLE IF EXISTS music_library1" try: rows = session.execute(query) except Exception as e: print(e) # - # ### Close the session and cluster connection session.shutdown() cluster.shutdown()
0. Back to Basics/3. NoSQL Data Models/2. Primary Key.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="qX_NidmdUK1s" # # Medical Image Classification Tutorial with the MedNIST Dataset # # In this tutorial, we introduce an end-to-end training and evaluation example based on the MedNIST dataset. # # We'll go through the following steps: # * Create a dataset for training and testing # * Use MONAI transforms to pre-process data # * Use the DenseNet from MONAI for classification # * Train the model with a PyTorch program # * Evaluate on test dataset # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/2d_classification/mednist_tutorial.ipynb) # + [markdown] id="eh4rxsPMUK1v" # ## Setup environment # + id="ArdPD1U_UK1v" outputId="363bf8f2-fda6-4ff8-f06e-c94ba3f6e788" colab={"base_uri": "https://localhost:8080/"} # !python -c "import monai" || pip install -q "monai-weekly[pillow, tqdm]" # !python -c "import matplotlib" || pip install -q matplotlib # %matplotlib inline # + [markdown] id="3Lw3JQz5UK1w" # ## Setup imports # + tags=[] id="PkPWAeTpUK1w" outputId="5a7f0f76-4a51-4126-b761-00a933ea8021" colab={"base_uri": "https://localhost:8080/"} # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import matplotlib.pyplot as plt import PIL import torch import numpy as np from sklearn.metrics import classification_report from monai.apps import download_and_extract from monai.config import print_config from monai.data import decollate_batch from monai.metrics import ROCAUCMetric from monai.networks.nets import DenseNet121 from monai.transforms import ( Activations, AddChannel, AsDiscrete, Compose, LoadImage, RandFlip, RandRotate, RandZoom, ScaleIntensity, EnsureType, ) from monai.utils import set_determinism print_config() # + [markdown] id="gVQOC7JrUK1x" # ## Setup data directory # # You can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. # This allows you to save results and reuse downloads. # If not specified a temporary directory will be used. # + tags=[] id="xZY9r_N6UK1y" outputId="71e36879-bbb5-4652-bf92-f1fbf67d214c" colab={"base_uri": "https://localhost:8080/"} directory = os.environ.get("MONAI_DATA_DIRECTORY") root_dir = tempfile.mkdtemp() if directory is None else directory print(root_dir) # + [markdown] id="f-BysxWnUK1y" # ## Download dataset # # The MedNIST dataset was gathered from several sets from [TCIA](https://wiki.cancerimagingarchive.net/display/Public/Data+Usage+Policies+and+Restrictions), # [the RSNA Bone Age Challenge](http://rsnachallenges.cloudapp.net/competitions/4), # and [the NIH Chest X-ray dataset](https://cloud.google.com/healthcare/docs/resources/public-datasets/nih-chest). # # The dataset is kindly made available by [Dr. <NAME>., Ph.D.](https://www.mayo.edu/research/labs/radiology-informatics/overview) (Department of Radiology, Mayo Clinic) # under the Creative Commons [CC BY-SA 4.0 license](https://creativecommons.org/licenses/by-sa/4.0/). # # If you use the MedNIST dataset, please acknowledge the source. # + tags=[] id="4uKfwSSZUK1z" outputId="2d194301-e701-45bf-cad8-437718833c5a" colab={"base_uri": "https://localhost:8080/"} resource = "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/MedNIST.tar.gz" md5 = "0bc7306e7427e00ad1c5526a6677552d" compressed_file = os.path.join(root_dir, "ATLAS_R2.0.tar.gz") data_dir = os.path.join(root_dir, "ATLAS_R2.0") if not os.path.exists(data_dir): download_and_extract(resource, compressed_file, root_dir, md5) # + [markdown] id="LMFeYS8IUK1z" # ## Set deterministic training for reproducibility # + id="upBiyJDiUK10" set_determinism(seed=0) # + [markdown] id="MzwY0z8rUK10" # ## Read image filenames from the dataset folders # # First of all, check the dataset files and show some statistics. # There are 6 folders in the dataset: Hand, AbdomenCT, CXR, ChestCT, BreastMRI, HeadCT, # which should be used as the labels to train our classification model. # + tags=[] id="PtHQWG7sUK10" outputId="bbe488ba-8c87-469a-bd56-790206b9a2b1" class_names = sorted(x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))) num_class = len(class_names) image_files = [ [ os.path.join(data_dir, class_names[i], x) for x in os.listdir(os.path.join(data_dir, class_names[i])) ] for i in range(num_class) ] num_each = [len(image_files[i]) for i in range(num_class)] image_files_list = [] image_class = [] for i in range(num_class): image_files_list.extend(image_files[i]) image_class.extend([i] * num_each[i]) num_total = len(image_class) image_width, image_height = PIL.Image.open(image_files_list[0]).size print(f"Total image count: {num_total}") print(f"Image dimensions: {image_width} x {image_height}") print(f"Label names: {class_names}") print(f"Label counts: {num_each}") # + [markdown] id="IfzJw9KYUK10" # ## Randomly pick images from the dataset to visualize and check # + id="3ODfAFnyUK11" outputId="963ebd6c-6325-433c-ec5d-03831b0cacb5" plt.subplots(3, 3, figsize=(8, 8)) for i, k in enumerate(np.random.randint(num_total, size=9)): im = PIL.Image.open(image_files_list[k]) arr = np.array(im) plt.subplot(3, 3, i + 1) plt.xlabel(class_names[image_class[k]]) plt.imshow(arr, cmap="gray", vmin=0, vmax=255) plt.tight_layout() plt.show() # + [markdown] id="-WWQ7v4KUK11" # ## Prepare training, validation and test data lists # # Randomly select 10% of the dataset as validation and 10% as test. # + tags=[] id="YAoo5ygGUK11" outputId="11d5a8c6-9297-4c1f-cbdb-141bc6164f75" val_frac = 0.1 test_frac = 0.1 length = len(image_files_list) indices = np.arange(length) np.random.shuffle(indices) test_split = int(test_frac * length) val_split = int(val_frac * length) + test_split test_indices = indices[:test_split] val_indices = indices[test_split:val_split] train_indices = indices[val_split:] train_x = [image_files_list[i] for i in train_indices] train_y = [image_class[i] for i in train_indices] val_x = [image_files_list[i] for i in val_indices] val_y = [image_class[i] for i in val_indices] test_x = [image_files_list[i] for i in test_indices] test_y = [image_class[i] for i in test_indices] print( f"Training count: {len(train_x)}, Validation count: " f"{len(val_x)}, Test count: {len(test_x)}") # + [markdown] id="iXzC_iamUK11" # ## Define MONAI transforms, Dataset and Dataloader to pre-process data # + id="PhhbfbH2UK12" train_transforms = Compose( [ LoadImage(image_only=True), AddChannel(), ScaleIntensity(), RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True), RandFlip(spatial_axis=0, prob=0.5), RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5), EnsureType(), ] ) val_transforms = Compose( [LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()]) y_pred_trans = Compose([EnsureType(), Activations(softmax=True)]) y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=num_class)]) # + id="WeBnNRXoUK12" class MedNISTDataset(torch.utils.data.Dataset): def __init__(self, image_files, labels, transforms): self.image_files = image_files self.labels = labels self.transforms = transforms def __len__(self): return len(self.image_files) def __getitem__(self, index): return self.transforms(self.image_files[index]), self.labels[index] train_ds = MedNISTDataset(train_x, train_y, train_transforms) train_loader = torch.utils.data.DataLoader( train_ds, batch_size=300, shuffle=True, num_workers=10) val_ds = MedNISTDataset(val_x, val_y, val_transforms) val_loader = torch.utils.data.DataLoader( val_ds, batch_size=300, num_workers=10) test_ds = MedNISTDataset(test_x, test_y, val_transforms) test_loader = torch.utils.data.DataLoader( test_ds, batch_size=300, num_workers=10) # + [markdown] id="wow6JxbGUK12" # ## Define network and optimizer # # 1. Set learning rate for how much the model is updated per batch. # 1. Set total epoch number, as we have shuffle and random transforms, so the training data of every epoch is different. # And as this is just a get start tutorial, let's just train 4 epochs. # If train 10 epochs, the model can achieve 100% accuracy on test dataset. # 1. Use DenseNet from MONAI and move to GPU devide, this DenseNet can support both 2D and 3D classification tasks. # 1. Use Adam optimizer. # + id="EfZ9BEvxUK12" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(device) loss_function = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), 1e-5) max_epochs = 4 val_interval = 1 auc_metric = ROCAUCMetric() # + [markdown] id="--GMMhO-UK12" # ## Model training # # Execute a typical PyTorch training that run epoch loop and step loop, and do validation after every epoch. # Will save the model weights to file if got best validation accuracy. # + tags=[] id="hhYVGWsBUK13" outputId="074f02b2-b20b-4d33-818f-2f30946175eb" best_metric = -1 best_metric_epoch = -1 epoch_loss_values = [] metric_values = [] for epoch in range(max_epochs): print("-" * 10) print(f"epoch {epoch + 1}/{max_epochs}") model.train() epoch_loss = 0 step = 0 for batch_data in train_loader: step += 1 inputs, labels = batch_data[0].to(device), batch_data[1].to(device) optimizer.zero_grad() outputs = model(inputs) loss = loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.item() print( f"{step}/{len(train_ds) // train_loader.batch_size}, " f"train_loss: {loss.item():.4f}") epoch_len = len(train_ds) // train_loader.batch_size epoch_loss /= step epoch_loss_values.append(epoch_loss) print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}") if (epoch + 1) % val_interval == 0: model.eval() with torch.no_grad(): y_pred = torch.tensor([], dtype=torch.float32, device=device) y = torch.tensor([], dtype=torch.long, device=device) for val_data in val_loader: val_images, val_labels = ( val_data[0].to(device), val_data[1].to(device), ) y_pred = torch.cat([y_pred, model(val_images)], dim=0) y = torch.cat([y, val_labels], dim=0) y_onehot = [y_trans(i) for i in decollate_batch(y)] y_pred_act = [y_pred_trans(i) for i in decollate_batch(y_pred)] auc_metric(y_pred_act, y_onehot) result = auc_metric.aggregate() auc_metric.reset() del y_pred_act, y_onehot metric_values.append(result) acc_value = torch.eq(y_pred.argmax(dim=1), y) acc_metric = acc_value.sum().item() / len(acc_value) if result > best_metric: best_metric = result best_metric_epoch = epoch + 1 torch.save(model.state_dict(), os.path.join( root_dir, "best_metric_model.pth")) print("saved new best metric model") print( f"current epoch: {epoch + 1} current AUC: {result:.4f}" f" current accuracy: {acc_metric:.4f}" f" best AUC: {best_metric:.4f}" f" at epoch: {best_metric_epoch}" ) print( f"train completed, best_metric: {best_metric:.4f} " f"at epoch: {best_metric_epoch}") # + [markdown] id="g0jkwFM5UK13" # ## Plot the loss and metric # + id="ZJt4d90LUK13" outputId="ef03a750-8d2b-48e8-edac-561e75c8d7dd" plt.figure("train", (12, 6)) plt.subplot(1, 2, 1) plt.title("Epoch Average Loss") x = [i + 1 for i in range(len(epoch_loss_values))] y = epoch_loss_values plt.xlabel("epoch") plt.plot(x, y) plt.subplot(1, 2, 2) plt.title("Val AUC") x = [val_interval * (i + 1) for i in range(len(metric_values))] y = metric_values plt.xlabel("epoch") plt.plot(x, y) plt.show() # + [markdown] id="ASQ9C4MmUK13" # ## Evaluate the model on test dataset # # After training and validation, we already got the best model on validation test. # We need to evaluate the model on test dataset to check whether it's robust and not over-fitting. # We'll use these predictions to generate a classification report. # + id="0XGtXVFUUK13" model.load_state_dict(torch.load( os.path.join(root_dir, "best_metric_model.pth"))) model.eval() y_true = [] y_pred = [] with torch.no_grad(): for test_data in test_loader: test_images, test_labels = ( test_data[0].to(device), test_data[1].to(device), ) pred = model(test_images).argmax(dim=1) for i in range(len(pred)): y_true.append(test_labels[i].item()) y_pred.append(pred[i].item()) # + tags=[] id="mAKmOkZYUK14" outputId="ca66fb48-a416-44d3-c28f-4f6c43a61df7" print(classification_report( y_true, y_pred, target_names=class_names, digits=4)) # + [markdown] id="taUqAG5TUK14" # ## Cleanup data directory # # Remove directory if a temporary was used. # + id="-Zz3k-NeUK14" if directory is None: shutil.rmtree(root_dir)
2d_classification/mednist_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # ## Advanced Lane Finding Project # # The goals / steps of this project are the following: # # * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images. # * Apply a distortion correction to raw images. # * Use color transforms, gradients, etc., to create a thresholded binary image. # * Apply a perspective transform to rectify binary image ("birds-eye view"). # * Detect lane pixels and fit to find the lane boundary. # * Determine the curvature of the lane and vehicle position with respect to center. # * Warp the detected lane boundaries back onto the original image. # * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position. # --- # + deletable=true editable=true import numpy as np # NumPy import cv2 # openCV import glob # Filename pattern matching import matplotlib.pyplot as plt # 2D plotting import matplotlib.image as mpimg # Interactive plotting in separate window # #%matplotlib qt # Visualizations will be shown in the notebook # %matplotlib inline # + [markdown] deletable=true editable=true # ## Compute the camera calibration points using chessboard images # + deletable=true editable=true def get_3d2d_points(do_plot=False, do_file=False): # Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,5,0) objp = np.zeros((6*9, 3), np.float32) objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2) # Arrays to store object points and image points from all the images objpoints = [] # 3D points in real world space imgpoints = [] # 2D points in image plain # List of calibration images images = glob.glob('camera_cal/calibration*.jpg') print('Num of calibration images: {0}'.format(len(images))) # Step through the list and search for chessboard corners for img_id, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners # http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#findchessboardcorners # cv2.findChessboardCorners(image, patternSize[, corners[, flags]]) → retval, corners ret, corners = cv2.findChessboardCorners(gray, (9, 6), None) # If found - add object points, add image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (9, 6), corners, ret) # Draw the plot if do_plot: plt.imshow(img) plt.show() # Save to the file if do_file: write_name = 'corners_' + str(img_id) + '.jpg' cv2.imwrite(write_name, img) return objpoints, imgpoints # + [markdown] deletable=true editable=true # ## Distortion correction # + deletable=true editable=true import pickle def pickle_dump(mtx, dist): # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = mtx dist_pickle["dist"] = dist pickle.dump(dist_pickle, open('wide_dist_pickle.p', 'wb')) def pickle_load(): # Getting back the camera calibration result: with open('wide_dist_pickle.p', 'rb') as f: dist_pickle = pickle.load(f) return dist_pickle['mtx'], dist_pickle['dist'] def calibrate_camera(img): img_size = (img.shape[1], img.shape[0]) # Do camera calibration given object points and image points objpoints, imgpoints = get_3d2d_points(do_plot=False, do_file=False) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None) # Save the camera calibration result pickle_dump(mtx, dist) return mtx, dist # + [markdown] deletable=true editable=true # ## Color/gradient threshold # # Combine color and gradient thresholds to generate a binary image where the lane lines are clearly visible. # # Output should be an array of the same size as the input image. The output array elements should be 1 where gradients were in the threshold range, and 0 everywhere else. # + deletable=true editable=true # For universal plotting of results def plot_row2(img1, img2, label_1, label_2, graysc=True): # Plot the result (1 row with 2 images) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 6)) f.tight_layout() if graysc: ax1.imshow(img1, cmap='gray') else: ax1.imshow(img1) ax1.set_title(label_1, fontsize=16) ax2.imshow(img2, cmap='gray') ax2.set_title(label_2, fontsize=16) plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.) # + deletable=true editable=true # Apply Sobel (Calculate directional gradient and apply gradient threshold) def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)): # Convert to HLS and take S channel img_trans = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) img_trans = img_trans[:,:,2] # Take the derivative in x or y given orient = 'x' or 'y' if orient == 'x': sobel = cv2.Sobel(img_trans, cv2.CV_64F, 1, 0, ksize=sobel_kernel) elif orient == 'y': sobel = cv2.Sobel(img_trans, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # Take the absolute value of the derivative or gradient abs_sobel = np.absolute(sobel) # Scale to 8-bit (0 - 255) then convert to type = np.uint8 scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel)) # Create a mask of 1's where the scaled gradient magnitude # is > thresh_min and < thresh_max binary_output = np.zeros_like(scaled_sobel) binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1 # Return mask as binary_output image return binary_output # Return the magnitude of the gradient for a given sobel kernel # size and threshold values in both x and y def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)): # Convert to HLS and take S channel img_trans = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) img_trans = img_trans[:,:,2] # Take the gradient in x and y separately sobel_x = cv2.Sobel(img_trans, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobel_y = cv2.Sobel(img_trans, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # Calculate the gradient magnitude gradmag = np.sqrt(sobel_x**2 + sobel_y**2) # Rescale to 8 bit scale_factor = np.max(gradmag)/255 gradmag = (gradmag/scale_factor).astype(np.uint8) # Create a binary mask where mag thresholds are met binary_output = np.zeros_like(gradmag) binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1 # Return mask as binary_output image return binary_output # Calculate gradient direction and apply threshold def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)): # Convert to HLS and take S channel img_trans = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) img_trans = img_trans[:,:,2] # Take the gradient in x and y separately sobel_x = cv2.Sobel(img_trans, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobel_y = cv2.Sobel(img_trans, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # Take the absolute value of the x and y gradients abs_sobel_x = np.absolute(sobel_x) abs_sobel_y = np.absolute(sobel_y) # Use np.arctan2(abs_sobel_y, abs_sobimg_transel_x) to calculate the direction of the gradient absgraddir = np.arctan2(abs_sobel_y, abs_sobel_x) # Create a binary mask where direction thresholds are met binary_output = np.zeros_like(absgraddir) binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1 # Return this mask as binary_output image return binary_output # + deletable=true editable=true def combine_sobel_thresholds(img, do_plot=False): # Gaussian Blur kernel_size = 5 img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) # Sobel kernel size (choose a larger odd number to smooth gradient measurements) ksize = 7 # Apply Sobel on x-axis grad_x_binary = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(10, 255)) # Apply Sobel on y-axis grad_y_binary = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(60, 255)) # Apply Sobel x and y, compute the magnitude of the gradient and apply a threshold mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(40, 255)) # Apply Sobel x and y, computes the direction of the gradient and apply a threshold dir_binary = dir_threshold(image, sobel_kernel=ksize, thresh=(0.65, 1.05)) # Combine the thresholds combined = np.zeros_like(dir_binary) combined[((grad_x_binary == 1) & (grad_y_binary == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1 if do_plot: plot_row2(image, grad_x_binary, 'Original Image (Undistorted)', 'Sobel on x-axis') plot_row2(grad_y_binary, mag_binary, 'Sobel on y-axis', 'Thresholded Magnitude') plot_row2(dir_binary, combined, 'Direction of gradient', 'Combined Thresholds') return combined # + deletable=true editable=true def color_channel_threshold(img, thresh=(0, 255), do_plot=False): # Convert to HLS color space and separate the S channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) # Extract S channel s_channel = hls[:,:,2] s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= thresh[0]) & (s_channel <= thresh[1])] = 1 # Extract L channel #l_channel = hls[:,:,1] #l_binary = np.zeros_like(l_channel) #l_binary[(l_channel >= thresh[0]) & (l_channel <= thresh[1])] = 1 # Combine S and L channels #combined = np.zeros_like(l_binary) #combined[((s_binary == 1) & (l_binary == 1))] = 1 if do_plot: plot_row2(image, s_binary, 'Original Image (Undistorted)', 'HLS(S-channel) threshold') # plot_row2(image, l_binary, 'Original Image', 'L threshold') # plot_row2(image, combined, 'Original Image', 'S and L threshold') #return combined return s_binary # + [markdown] deletable=true editable=true # ## Perspective transform # # Pick four points in a trapezoidal shape (similar to region masking) that would represent a rectangle when looking down on the road from above. # # The easiest way to do this is to investigate an image where the lane lines are straight, and find four points lying along the lines that, after perspective transform, make the lines look straight and vertical from a bird's eye view perspective. # + deletable=true editable=true # Applies an image mask # Only keeps the region of the image defined by the polygon formed from `vertices`. # The rest of the image is set to black. def region_of_interest(img, vertices): # Defining a blank mask to start with mask = np.zeros_like(img) ignore_mask_color = 255 # Fill pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) # Return the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def perspective_transform(img, inv=False): # Define 4 source points src = np.float32([[180, img.shape[0]], [575, 460], [705, 460], [1150, img.shape[0]]]) # Define 4 destination points dst = np.float32([[320, img.shape[0]], [320, 0], [960, 0], [960, img.shape[0]]]) # Use cv2.getPerspectiveTransform() to get M, the transform matrix if inv == False: M = cv2.getPerspectiveTransform(src, dst) else: M = cv2.getPerspectiveTransform(dst, src) # Use cv2.warpPerspective() to warp image to a top-down view img_size = (img.shape[1], img.shape[0]) warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR) return warped # + [markdown] deletable=true editable=true # ## Image Pipeline # - # Define a class to receive the characteristics of each line detection class Line(): def __init__(self): # Was the line detected in the last iteration? self.detected = False # x values of the last n fits of the line self.recent_xfitted = collections.deque(12*[0.0, 0.0, 0.0], 12) # Average x values of the fitted line over the last n iterations self.bestx = None # Polynomial coefficients averaged over the last n iterations self.best_fit = None # Polynomial coefficients for the most recent fit self.current_fit = [np.array([False])] # Radius of curvature of the line in some units (meters) self.radius_of_curvature = None # Distance in meters of vehicle center from the line self.line_base_pos = None # difference in fit coefficients between last and new fits self.diffs = np.array([0,0,0], dtype='float') # x values for detected line pixels self.allx = None # y values for detected line pixels self.ally = None # + deletable=true editable=true # CAMERA CALIBRATION AND IMAGE UNDISTORTION def get_undist(): # Test undistortion on the image img = cv2.imread('camera_cal/calibration1.jpg') #img = mpimg.imread('test_images/test1.jpg') # Calibrate camera and save data to pickle #mtx, dist = calibrate_camera(img) # Load calibration data from pickle mtx, dist = pickle_load() # Undistort image dst = cv2.undistort(img, mtx, dist, None, mtx) # Visualize undistortion #plot_row2(img, dst, 'Original Image', 'Undistorted Image') return mtx, dist # + deletable=true editable=true # COLOR / GRADIENT THRESHOLD mtx, dist = get_undist() def threshold(image): # Get calibration data #mtx, dist = get_undist() # Undistort image undist_image = cv2.undistort(image, mtx, dist, None, mtx) # Perform Sobel operations and combine thresholds combine_sobel = combine_sobel_thresholds(undist_image, do_plot=False) # Threshold color channel color_thresh = color_channel_threshold(undist_image, thresh=(160, 255), do_plot=False) # Combine color and gradient thresholds combined_binary = np.zeros_like(color_thresh) combined_binary[(combine_sobel == 1) | (color_thresh == 1)] = 1 # Plot results #plot_row2(combine_sobel, color_thresh, 'Combined Sobel operations', 'S threshold', graysc=True) #plot_row2(undist_image, combined_binary, 'Original Image (Undistorted)', 'Final Thresholded Image') return combined_binary, undist_image # + deletable=true editable=true # PERSPECTIVE TRANSFORM def warp(thresholded): # Plot borders (for experiments) #img = mpimg.imread('test_images/straight_lines1.jpg') #pts = np.array([[180, img.shape[0]], [575, 460], [705, 460], [1150, img.shape[0]]], np.int32) #pts = pts.reshape((-1,1,2)) #cv2.polylines(img, [pts], True, (0,255,255), 1) #img_tr = perspective_transform(img) #plot_row2(img, img_tr, 'Undistorted image', 'Warped image') warped_img = perspective_transform(thresholded) #plot_row2(thresholded, warped_img, 'Combined binary image', 'Warped image') # Define image mask (polygon of interest) imshape = warped_img.shape vertices = np.array([[(200, imshape[0]), (200, 0), (imshape[1] - 200, 0), (imshape[1]-200, imshape[0])]], dtype=np.int32) masked_img = region_of_interest(warped_img, vertices) #plot_row2(warped_img, masked_img, 'Warped image', 'Warped image with mask') return masked_img # + deletable=true editable=true import collections def find_base_pts(warped): # Take a histogram of the bottom half of the masked image histogram = np.sum(warped[warped.shape[0]//2:,:], axis=0) #plt.plot(histogram) # Find the peak of the left and right halves of the histogram midpoint = np.int(histogram.shape[0]/2) leftx_base = np.argmax(histogram[:midpoint]) rightx_base = np.argmax(histogram[midpoint:]) + midpoint return midpoint, leftx_base, rightx_base # DETECT LANE LINES def detect_lines(warped): lines_detected = False # Find the starting point for the left and right lines midpoint, leftx_base, rightx_base = find_base_pts(warped) # Create an output image to draw on and visualize the result out_img = np.dstack((warped, warped, warped))*255 # Number of sliding windows nwindows = 9 # Height of windows: e.g. 720/9=80 window_height = np.int(warped.shape[0] / nwindows) # Identify the x and y positions of all nonzero pixels in the image nonzero = warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Current positions to be updated for each window leftx_current = leftx_base rightx_current = rightx_base # Set the width of the windows +/- margin margin = 100 # Set minimum number of pixels found to recenter window minpix = 50 # Create empty lists to receive left and right lane pixel indices left_lane_inds = [] right_lane_inds = [] # Sliding windows if (left_line.detected == False) or (right_line.detected == False) : # Step through the windows one by one for window in range(nwindows): # Identify window boundaries in x and y (and right and left) win_y_low = warped.shape[0] - (window + 1) * window_height win_y_high = warped.shape[0] - window * window_height win_xleft_low = leftx_current - margin win_xleft_high = leftx_current + margin win_xright_low = rightx_current - margin win_xright_high = rightx_current + margin # Draw the windows on the visualization image #cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0,255,0), 5) #cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0,255,0), 5) # Identify the nonzero pixels in x and y within the window good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] # Append these indices to the lists left_lane_inds.append(good_left_inds) right_lane_inds.append(good_right_inds) # If you found > minpix pixels, recenter next window on their mean position if len(good_left_inds) > minpix: leftx_current = np.int(np.mean(nonzerox[good_left_inds])) if len(good_right_inds) > minpix: rightx_current = np.int(np.mean(nonzerox[good_right_inds])) # Concatenate the arrays of indices left_lane_inds = np.concatenate(left_lane_inds) right_lane_inds = np.concatenate(right_lane_inds) left_line.detected = True right_line.detected = True else: left_lane_inds = ((nonzerox > (left_line.current_fit[0] * (nonzeroy**2) + left_line.current_fit[1] * nonzeroy + left_line.current_fit[2] - margin)) & (nonzerox < (left_line.current_fit[0] * (nonzeroy**2) + left_line.current_fit[1] * nonzeroy + left_line.current_fit[2] + margin))) right_lane_inds = ((nonzerox > (right_line.current_fit[0] * (nonzeroy**2) + right_line.current_fit[1] * nonzeroy + right_line.current_fit[2] - margin)) & (nonzerox < (right_line.current_fit[0] * (nonzeroy**2) + right_line.current_fit[1] * nonzeroy + right_line.current_fit[2] + margin))) # Extract left and right line pixel positions leftx = nonzerox[left_lane_inds] lefty = nonzeroy[left_lane_inds] rightx = nonzerox[right_lane_inds] righty = nonzeroy[right_lane_inds] # Here we need to save successful fit of lines to prevent case with empty x, y if (len(leftx) < 1500): leftx = left_line.allx lefty = left_line.ally left_line.detected = False else: left_line.allx = leftx left_line.ally = lefty if (len(rightx) < 1500): rightx = right_line.allx righty = right_line.ally right_line.detected = False else: right_line.allx = rightx right_line.ally = righty # Fit a second order polynomial to each right_fit = np.polyfit(righty, rightx, 2) left_fit = np.polyfit(lefty, leftx, 2) # Sanity check: # INIT if (left_line.current_fit[0] == False): left_line.current_fit = left_fit right_line.current_fit = right_fit if (abs(left_line.current_fit[1] - left_fit[1]) > 0.18): #print('FOR_UPD_DIFF_LEFT_C2 = {}'.format(abs(left_line.current_fit[1] - left_fit[1]))) #print('FOR_UPD_DIFF_LEFT_C3 = {}'.format(abs(left_line.current_fit[2] - left_fit[2]))) left_line.current_fit = left_line.best_fit #print('UPD_LEFT left_fit_1 = {} | right_fit_1 = {}'.format(left_line.current_fit[0], right_line.current_fit[0])) #print('UPD_LEFT left_fit_2 = {} | right_fit_2 = {}'.format(left_line.current_fit[1], right_line.current_fit[1])) #print('UPD_LEFT left_fit_3 = {} | right_fit_3 = {}'.format(left_line.current_fit[2], right_line.current_fit[2])) left_line.detected = False else: #print('NORMAL_DIFF_LEFT_C2 = {}'.format(abs(left_line.current_fit[1] - left_fit[1]))) #print('NORMAL_DIFF_LEFT_C3 = {}'.format(abs(left_line.current_fit[2] - left_fit[2]))) left_line.current_fit = left_fit #print('NORMAL_LEFT left_fit_1 = {} | right_fit_1 = {}'.format(left_line.current_fit[0], right_line.current_fit[0])) #print('NORMAL_LEFT left_fit_2 = {} | right_fit_2 = {}'.format(left_line.current_fit[1], right_line.current_fit[1])) #print('NORMAL_LEFT left_fit_3 = {} | right_fit_3 = {}'.format(left_line.current_fit[2], right_line.current_fit[2])) left_line.recent_xfitted.pop() left_line.recent_xfitted.appendleft(left_line.current_fit) avg = np.array([0,0,0], dtype='float') for element in left_line.recent_xfitted: avg = avg + element left_line.best_fit = avg / (len(left_line.recent_xfitted)) if (abs(right_line.current_fit[1] - right_fit[1]) > 0.18): #print('FOR_UPD_DIFF_RIGHT_C2 = {}'.format(abs(right_line.current_fit[1] - right_fit[1]))) #print('FOR_UPD_DIFF_RIGHT_C3 = {}'.format(abs(right_line.current_fit[2] - right_fit[2]))) right_line.current_fit = right_line.best_fit #print('UPD_RIGHT left_fit_1 = {} | right_fit_1 = {}'.format(left_line.current_fit[0], right_line.current_fit[0])) #print('UPD_RIGHT left_fit_2 = {} | right_fit_2 = {}'.format(left_line.current_fit[1], right_line.current_fit[1])) #print('UPD_RIGHT left_fit_3 = {} | right_fit_3 = {}'.format(left_line.current_fit[2], right_line.current_fit[2])) right_line.detected = False else: #print('NORMAL_DIFF_RIGHT_C2 = {}'.format(abs(right_line.current_fit[1] - right_fit[1]))) #print('NORMAL_DIFF_RIGHT_C3 = {}'.format(abs(right_line.current_fit[2] - right_fit[2]))) right_line.current_fit = right_fit #print('NORMAL_RIGHT left_fit_1 = {} | right_fit_1 = {}'.format(left_line.current_fit[0], right_line.current_fit[0])) #print('NORMAL_RIGHT left_fit_2 = {} | right_fit_2 = {}'.format(left_line.current_fit[1], right_line.current_fit[1])) #print('NORMAL_RIGHT left_fit_3 = {} | right_fit_3 = {}'.format(left_line.current_fit[2], right_line.current_fit[2])) right_line.recent_xfitted.pop() right_line.recent_xfitted.appendleft(right_line.current_fit) avg = np.array([0,0,0], dtype='float') for element in right_line.recent_xfitted: avg = avg + element right_line.best_fit = avg / (len(right_line.recent_xfitted)) if (abs(right_line.current_fit[1] - right_fit[1]) > 0.38 and abs(left_line.current_fit[1] - left_fit[1]) < 0.1): right_line.current_fit[0] = left_line.current_fit[0] right_line.current_fit[1] = left_line.current_fit[1] right_line.current_fit[2] = left_line.current_fit[2] + 600 right_line.recent_xfitted.pop() right_line.recent_xfitted.appendleft(right_line.current_fit) avg = np.array([0,0,0], dtype='float') for element in right_line.recent_xfitted: avg = avg + element right_line.best_fit = avg / (len(right_line.recent_xfitted)) if (abs(left_line.current_fit[1] - left_fit[1]) > 0.38 and abs(right_line.current_fit[1] - right_fit[1]) < 0.1): left_line.current_fit = left_fit left_line.recent_xfitted.pop() left_line.recent_xfitted.appendleft(left_line.current_fit) avg = np.array([0,0,0], dtype='float') for element in left_line.recent_xfitted: avg = avg + element left_line.best_fit = avg / (len(left_line.recent_xfitted)) # Generate x and y values for plotting ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0] ) left_fitx = (left_line.current_fit[0] * ploty**2 + left_line.current_fit[1] * ploty + left_line.current_fit[2]) right_fitx = (right_line.current_fit[0] * ploty**2 + right_line.current_fit[1] * ploty + right_line.current_fit[2]) out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0] out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255] #fig = plt.figure(figsize=(18, 6)) #plt.imshow(out_img) #plt.plot(left_fitx, ploty, color='yellow') #plt.plot(right_fitx, ploty, color='yellow') #plt.xlim(0, 1280) #plt.ylim(720, 0) return out_img, ploty, leftx, lefty, rightx, righty, left_fit, right_fit, left_fitx, right_fitx # + deletable=true editable=true # VEHICLE OFFSET FROM LANE CENTER def calc_offset(xm_per_pix, ym_per_pix, y_eval, left_fit_cr, right_fit_cr): # Calculate x bottom position for y for left lane left_lane_bottom = (left_fit_cr[0] * (y_eval * ym_per_pix) ** 2 + left_fit_cr[1] * (y_eval * ym_per_pix) + left_fit_cr[2]) # Calculate x bottom position for y for right lane right_lane_bottom = (right_fit_cr[0] * (y_eval * ym_per_pix) ** 2 + right_fit_cr[1] * (y_eval * ym_per_pix) + right_fit_cr[2]) # Calculate the mid point of the lane lane_midpoint = float(right_lane_bottom + left_lane_bottom) / 2 # Calculate the image center in meters from left edge of the image image_mid_point_in_meter = 1280/2 * xm_per_pix # Positive value indicates vehicle on the right side of lane center, else on the left. lane_deviation = (image_mid_point_in_meter - lane_midpoint) #print ('left_lane_bottom = {:.3f}'.format(left_lane_bottom)) #print ('right_lane_bottom = {:.3f}'.format(right_lane_bottom)) #print ('lane_midpoint = {:.3f}'.format(lane_midpoint)) #print ('image_mid_point_in_meter = {:.3f}'.format(image_mid_point_in_meter)) #print ('lane_deviation = {:.3f}'.format(lane_deviation)) return lane_deviation # MEASURE RADIUS OF CURVATURE AND VEHICLE OFFSET def rad_and_offset(ploty, leftx, lefty, rightx, righty, left_fit, right_fit): # Plot up the data #mark_size = 0.1 #plt.plot(leftx, lefty, 'o', color='red', markersize=mark_size) #plt.plot(rightx, righty, 'o', color='blue', markersize=mark_size) #plt.xlim(0, 1280) #plt.ylim(0, 720) #plt.plot(left_fitx, ploty, color='green', linewidth=3) #plt.plot(right_fitx, ploty, color='green', linewidth=3) #plt.gca().invert_yaxis() # Define y-value where we want radius of curvature # I'll choose the maximum y-value, corresponding to the bottom of the image y_eval = np.max(ploty) left_curverad = (((1 + (2 * left_fit[0] * y_eval + left_fit[1])**2)**1.5) / np.absolute(2 * left_fit[0])) right_curverad = (((1 + (2 * right_fit[0] * y_eval + right_fit[1])**2)**1.5) / np.absolute(2 * right_fit[0])) curverad_avg = (left_curverad + right_curverad)/2 #print('Left: {}, Right: {}, AVG: {}'.format(left_curverad, right_curverad, curverad_avg)) # Define conversions in x and y from pixels space to meters ym_per_pix = 30/720 # meters per pixel in y dimension xm_per_pix = 3.7/700 # meters per pixel in x dimension # Fit a second order polynomial to each left_fit_cr = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2) right_fit_cr = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2) # Calculate the new radius of curvature in meters left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2 * left_fit_cr[0]) right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2 * right_fit_cr[0]) curverad_avg = (left_curverad + right_curverad) / 2 # Vehicle offset lane_deviation = calc_offset(xm_per_pix, ym_per_pix, y_eval, left_fit_cr, right_fit_cr) #print('Left: {} m, Right: {} m, AVG: {} m'.format(left_curverad, right_curverad, curverad_avg)) #print ('lane_deviation = {:.3f}'.format(lane_deviation)) return left_curverad, right_curverad, lane_deviation # + deletable=true editable=true # DRAWING def draw_res(warped, undistorted, out_img, ploty, left_fitx, right_fitx, left_curverad, right_curverad, lane_deviation): # Create an image to draw the lines on warp_zero = np.zeros_like(warped).astype(np.uint8) color_warp = np.dstack((warped, warped, warped)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) # Warp the blank back to original image space using inverse perspective matrix (Minv) newwarp = perspective_transform(color_warp, inv=True) # Combine the result with the original image result = cv2.addWeighted(undistorted, 1, newwarp, 0.3, 0) x_offset = result.shape[1] - 320 - 30 y_offset = 30 thumb = cv2.resize(out_img, (320, 200), interpolation = cv2.INTER_CUBIC) result[y_offset:y_offset + thumb.shape[0], x_offset:x_offset + thumb.shape[1]] = thumb font = cv2.FONT_HERSHEY_SIMPLEX curv_l_label = 'Radius of Curvature (Left line): {:.0f} m.'.format(left_curverad) curv_r_label = 'Radius of Curvature (Right line): {:.0f} m.'.format(right_curverad) deviation_label = 'Vehicle Deviation: {:.3f} m.'.format(lane_deviation) cv2.putText(result, curv_l_label, (30, 60), font, 1, (255,255,255), 2) cv2.putText(result, curv_r_label, (30, 110), font, 1, (255,255,255), 2) cv2.putText(result, deviation_label, (30, 160), font, 1, (255,255,255), 2) #plot_row2(undist_image, result, 'Original Frame (Undistorted)', 'Processed Frame') #fig = plt.figure(figsize=(20, 8)) #plt.imshow(result) return result # + deletable=true editable=true def image_pipeline(image): # Threshold image thresholded, undistorted = threshold(image) # Warp image warped = warp(thresholded) # Detect lane lines out_img, ploty, leftx, lefty, rightx, righty, left_fit, right_fit, left_fitx, right_fitx = detect_lines(warped) # Radius of curvature for left and right lines left_curverad, right_curverad, lane_deviation = rad_and_offset(ploty, leftx, lefty, rightx, righty, left_fit, right_fit) # Draw output proc_img = draw_res(warped, undistorted, out_img, ploty, left_fitx, right_fitx, left_curverad, right_curverad, lane_deviation) return proc_img # + deletable=true editable=true # MONITOR (FOR TESTING IMAGE PIPELINE) # Load original image from camera left_line = Line() right_line = Line() image = mpimg.imread('test_images/test1.jpg') proc_img = image_pipeline(image) # + deletable=true editable=true # MONITOR (FOR TESTING VIDEO PIPELINE) # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML left_line = Line() right_line = Line() output_video = 'project_video_processed.mp4' clip1 = VideoFileClip("project_video.mp4") video_clip = clip1.fl_image(image_pipeline) # %time video_clip.write_videofile(output_video, audio=False) # + deletable=true editable=true HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(output_video)) # + deletable=true editable=true
CarND-Advanced-Lane-Lines.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # gensim # # [Gensim](https://radimrehurek.com/gensim/) is a topic modeling API. However, gensim also has the ability to create word and document embeddings. Below is a dataset of book titles dealing with two "topics", one for "data science" and the other for "finance". We will use this dataset to see how gensim can be used to learn and recover topics. # + tags=[] import pandas as pd import numpy as np import random np.random.seed(37) random.seed(37) df = pd.DataFrame({ 'text': [ 'Data Science from Scratch: First Principles with Python', 'Data Science for Business: What You Need to Know about Data Mining and Data-Analytic Thinking', 'Practical Statistics for Data Scientists', 'Build a Career in Data Science', 'Python Data Science Handbook', 'Storytelling with Data: A Data Visualization Guide for Business Professionals', 'R for Data Science: Import, Tidy, Transform, Visualize, and Model Data', 'Data-Driven Science and Engineering: Machine Learning, Dynamical Systems, and Control', 'A Hands-On Introduction to Data Science', 'Intro to Python for Computer Science and Data Science: Learning to Program with AI, Big Data and The Cloud', 'How Finance Works: The HBR Guide to Thinking Smart About the Numbers', 'The Intelligent Investor: The Definitive Book on Value Investing. A Book of Practical Counsel', 'Introduction to Finance: Markets, Investments, and Financial Management', 'Python for Finance: Mastering Data-Driven Finance', 'The Infographic Guide to Personal Finance: A Visual Reference for Everything You Need to Know', 'Personal Finance For Dummies', 'Corporate Finance For Dummies', 'Lords of Finance: The Bankers Who Broke the World', 'Real Estate Finance & Investments', 'Real Estate Finance and Investments Risks and Opportunities' ], 'clazz': [1 for _ in range(10)] + [0 for _ in range(10)] }) df.shape # - # We have to tokenize, lemmatize and remove stop words from the original text. # + tags=[] import string from nltk import word_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english') + list(string.punctuation)) is_valid = lambda t: t not in stop_words tokenize = lambda t: word_tokenize(t.lower()) lemmatize = lambda t: lemmatizer.lemmatize(t) df['normalized'] = df.text.apply(lambda text: [lemmatize(t) for t in tokenize(text) if is_valid(t)]) df.shape # - # Words that appear only once are removed. # + tags=[] from itertools import chain import nltk dist = nltk.FreqDist(list(chain(*df.normalized))) dist = pd.Series([v for _, v in dist.items()], dist.keys()).sort_values(ascending=False) dist = dist[dist > 1] valid_tokens = set(list(dist.index)) df['valid'] = df.normalized.apply(lambda tokens: [t for t in tokens if t in valid_tokens]) df.shape # - # ## Topic modeling # # Now we will apply Latent Semantic Indexing `LSI` and Laten Dirchlet Allocation `LDA` for topic modeling. # + tags=[] from gensim import corpora dictionary = corpora.Dictionary(df.valid) corpus = [dictionary.doc2bow(text) for text in df.valid] # + tags=[] from gensim import models tfidf_model = models.TfidfModel(corpus) corpus_tfidf = tfidf_model[corpus] lsi_model = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2) corpus_lsi = lsi_model[corpus_tfidf] lda_model = models.LdaModel(corpus, id2word=dictionary, iterations=50, num_topics=2) corpus_lda = lda_model[corpus] # - # ### Visualize document vectors # # The LSI and LDA models can transform the documents into vector representation in principal component `PC` or probabilistic space, respectively. We try to visualize the documents in these spaces as a 2D visualization. # + tags=[] import numpy as np lsi_data = np.array([[tup[1] for tup in arr] for arr in corpus_lsi]) lda_data = np.array([[tup[1] for tup in arr] for arr in corpus_lda]) # + tags=[] import matplotlib.pyplot as plt x = lsi_data[:,0] y = lsi_data[:,1] c = ['r' if i == 0 else 'g' for i in df.clazz] fig, ax = plt.subplots(figsize=(5, 5)) _ = ax.scatter(x, y, c=c) # + tags=[] x = lda_data[:,0] y = lda_data[:,1] c = ['r' if i == 0 else 'g' for i in df.clazz] fig, ax = plt.subplots(figsize=(5, 5)) _ = ax.scatter(x, y, c=c) # - # ### Print word to topic relevance # # The word importances to each topic can also be retrieved. # + tags=[] lsi_model.print_topics() # - lda_model.print_topics() # ### Check coherence # # The two coherence measures below are `u_mass` and `c_v`. At a high level, these scores try to capture how well the words in each topic fit (co-occur) together. Better information about these scores are available at the links following. # # - [How does topic coherence score in LDA intuitively makes sense?](https://stats.stackexchange.com/questions/375062/how-does-topic-coherence-score-in-lda-intuitively-makes-sense) # - [What is the formula for c_v coherence?](https://stats.stackexchange.com/questions/406216/what-is-the-formula-for-c-v-coherence) # + tags=[] from gensim.models.coherencemodel import CoherenceModel lsi_cm = CoherenceModel(model=lsi_model, corpus=corpus_lsi, coherence='u_mass') lda_cm = CoherenceModel(model=lda_model, corpus=corpus_lda, coherence='u_mass') print('lsi', lsi_cm.get_coherence()) print('lda', lda_cm.get_coherence()) # + tags=[] lsi_cm = CoherenceModel(model=lsi_model, texts=df.valid, coherence='c_v') lda_cm = CoherenceModel(model=lda_model, texts=df.valid, coherence='c_v') print('lsi', lsi_cm.get_coherence()) print('lda', lda_cm.get_coherence()) # - # ### Check clustering quality # # The silhouette score can evaluate the clustering quality without the truth labels, while the rand score will evaluate the clustering quality with the truth labels. For both these scores, a higher value is better. # + tags=[] from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score, rand_score kmeans = KMeans(n_clusters=2, random_state=37).fit(lsi_data) sil_score = silhouette_score(lsi_data, kmeans.labels_) ran_score = rand_score(df.clazz, kmeans.labels_) print(f'silhouette: {sil_score:.5f}, rand: {ran_score}') # + tags=[] kmeans = KMeans(n_clusters=2, random_state=37).fit(lda_data) sil_score = silhouette_score(lsi_data, kmeans.labels_) ran_score = rand_score(df.clazz, kmeans.labels_) print(f'silhouette: {sil_score:.5f}, rand: {ran_score}') # - # ## word2vec # # Word2vec `w2v` is an approach to vectorize words such that in the vector space, words that have similar meaning will be closer algebraically. These vectors are often called `embeddings`. The field has matured to the point where embeddings may be created for all sorts of data (not only text but also images). For example, there are also sentence, paragraph and document embeddings techniques. # # Below, we learn word embeddings for the book titles. The number of iterations `iter` is set to a very high number to get some stability. # + from gensim.test.utils import datapath from gensim import utils class BookTitleCorpus: def __iter__(self): titles = list(df.valid) for title in titles: yield title # + from gensim.models import Word2Vec w2v_model = Word2Vec(**{ 'sentences': BookTitleCorpus(), 'size': 5, 'window': 1, 'min_count': 1, 'negative': 1, 'iter': 5000, 'compute_loss': True, 'hs': 0, 'sg': 1, 'seed': 37 }) # - # All the words that can be mapped to an embedding is access through the `wv.index2word` property. w2v_model.wv.index2word # You may retrieve the vector for each word as follows. w2v_model.wv['data'] w2v_model.wv['science'] w2v_model.wv['finance'] # You can also query for similary words. print(w2v_model.wv.most_similar(positive=['data'])) print(w2v_model.wv.most_similar(positive=['science'])) print(w2v_model.wv.most_similar(positive=['finance'])) # + from sklearn.manifold import TSNE import numpy as np def reduce_dimensions(model): num_dimensions = 2 vectors = np.asarray(model.wv.vectors) labels = np.asarray(model.wv.index2word) tsne = TSNE(n_components=num_dimensions, random_state=0) vectors = tsne.fit_transform(vectors) x_vals = [v[0] for v in vectors] y_vals = [v[1] for v in vectors] return x_vals, y_vals, labels x_vals, y_vals, labels = reduce_dimensions(w2v_model) fig, ax = plt.subplots(figsize=(5, 5)) _ = ax.scatter(x_vals, y_vals) indices = list(range(len(labels))) selected_indices = random.sample(indices, 5) for i in selected_indices: ax.annotate(labels[i], (x_vals[i], y_vals[i])) # - # ## doc2vec # # Doc2vec `d2v` creates vectors over documents. Take note of the `epochs` required for training to get stability. # + from gensim.models.doc2vec import TaggedDocument, Doc2Vec def read_corpus(tokens_only=False): for i, tokens in enumerate(df.valid): if tokens_only: yield tokens else: yield TaggedDocument(tokens, [i]) tr_corpus = list(read_corpus()) te_corpus = list(read_corpus(True)) d2v_model = Doc2Vec(vector_size=5, min_count=1, epochs=8000) d2v_model.build_vocab(tr_corpus) d2v_model.train(tr_corpus, total_examples=d2v_model.corpus_count, epochs=d2v_model.epochs) # - # After the d2v model is trained, you can infer the vectors for any text. vector = d2v_model.infer_vector('data science is cool'.split(' ')) print(vector) vector = d2v_model.infer_vector('science is cool'.split(' ')) print(vector) vector = d2v_model.infer_vector('finance is cool'.split(' ')) print(vector) # Below is diagnostic code to see if a document is similar to itself. The results show that most times, a document is most similar to itself. This process is not deterministic and different runs will generate different results. # + from collections import Counter ranks = [] for doc_id in range(len(tr_corpus)): inferred_vector = d2v_model.infer_vector(tr_corpus[doc_id].words) sims = d2v_model.docvecs.most_similar([inferred_vector], topn=len(d2v_model.docvecs)) rank = [docid for docid, sim in sims].index(doc_id) ranks.append(rank) print(Counter(ranks)) # - # ## Word mover's distance # + import gensim.downloader as api wm_model1 = api.load('word2vec-google-news-300') wm_model2 = api.load('word2vec-google-news-300') wm_model2.init_sims(replace=True) # + import pandas as pd import seaborn as sns import matplotlib.pyplot as plt docs = list(df.valid) distances1 = [] distances2 = [] for i in range(len(docs)): d1 = {f'd{j}': wm_model1.wmdistance(docs[i], docs[j]) for j in range(len(docs))} d2 = {f'd{j}': wm_model2.wmdistance(docs[i], docs[j]) for j in range(len(docs))} distances1.append(d1) distances2.append(d2) dist_df1 = pd.DataFrame(distances1) dist_df2 = pd.DataFrame(distances1) dist_df1.index = [f'd{i}' for i in range(len(docs))] dist_df2.index = [f'd{i}' for i in range(len(docs))] fig, ax = plt.subplots(1, 2, figsize=(15, 8)) _ = sns.heatmap(dist_df1, cmap=sns.color_palette('Greens'), ax=ax[0]) _ = sns.heatmap(dist_df2, cmap=sns.color_palette('Greens'), ax=ax[1]) _ = ax[0].set_title('Doc Similarities') _ = ax[1].set_title('Doc Similarities (normalized)')
sphinx/scikit-intro/source/gensim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # + class LogisticRegressionClassifier: def __init__(self, learning_rate = 0.1, tol = 1e-4, n_iter = 600, penalty = 'l2', C=0.1): self.learning_rate = learning_rate self.n_iter = n_iter #‘l1’, ‘l2’, ‘elasticnet’, ‘none’ self.penalty = penalty self.C = C self.tol = tol def normalization(self, X): n_sample = X.shape[0] mean = np.mean(X,axis=0,dtype=np.float64) var = np.var(X,axis=0,dtype=np.float64) return (X-mean)/(np.sqrt(var)) def _sigmoid(self, Z): return 1/(1+np.exp(-Z)) def cost(self, Z, y): n_sample = Z.shape[0] if self.penalty == 'l2': return( -1/n_sample * self.C * np.sum((y * np.log(self._sigmoid(Z)+1e-8)) + ((1 - y) * np.log(1 - self._sigmoid(Z)+1e-8))) + 1/(2*n_sample)*np.sum(self.w**2)) elif self.penalty == None: return( -1/n_sample * np.sum((y * np.log(self._sigmoid(Z)+1e-8)) + ((1 - y) * np.log(1 - self._sigmoid(Z)+1e-8)))) def fit(self, X, y, normalization = True): """ Fit method for training data. Parameters: ----------------------- X_train: {array-like}, shape = [n_samples, n_features] Training matrix, where 'n_samples' is the number of samples and 'n_features' is the number of features y_train: {array-like}, shape = [n_samples] Target labels Attributes: ----------------------- d_record_: list Record all distance. error_rate_: list Record all missclassification rate. Returns: ------------------------ self: object """ # Initilize a lst to record costs self._costs = [] #add a 1 in each row(intercept) into X X = np.insert(X,0,1,axis=1) n_sample, n_feature = X.shape # initialize w self.w = np.ones(n_feature) # find optimal w for count in range(self.n_iter): Z = np.dot(X,self.w) errors = self._sigmoid(Z)-y self.cost(Z,y) self._costs.append(self.cost(Z,y)) if self.cost(Z,y) <= self.tol: print('stop at iteration {}'.format(count)) break if self.penalty == 'l2': gradient = 1/n_sample * (self.C*np.dot(X.T,errors) + (self.w)) elif self.penalty == 'l1': pass elif self.penalty == 'elasticnet': pass elif self.penalty == None: gradient = 1/n_sample * np.dot(X.T,errors) self.w -= self.learning_rate * gradient def predict(self, X): #add a 1 in each row(intercept) into X X = np.insert(X,0,1,axis=1) Z = np.dot(X,self.w) return np.where(self._sigmoid(Z)>=0.5,1,0) def score(self, X, y): return sum(self.predict(X) == y) / len(y) # + from sklearn import datasets from sklearn.model_selection import train_test_split cancer=datasets.load_breast_cancer() X=cancer.data y=cancer.target X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=0) log = LogisticRegressionClassifier(penalty = 'l2') log.fit(X_train, y_train) y_pred=log.predict(X_test) # - log.w from sklearn.metrics import classification_report,confusion_matrix print(classification_report(y_test,y_pred)) confusion_matrix(y_test,y_pred) import matplotlib.pyplot as plt plt.plot(log._costs) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train) y_pred=lr.predict(X_test) print(classification_report(y_test,y_pred)) confusion_matrix(y_test,y_pred)
Logistic Regression/LogisticRegressionClassifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python ' # language: python # name: py # --- # # Load photos banchmark # Before running benchmark, download the data from the repository https://github.com/ria-com/nomeroff-net/tree/master/banchmarks/images to the images folder import time import os import numpy as np dirName = "./images" testData = { "0.jpeg": ["AI5255EI"], "1.jpeg": ["AT6883CM"], "2.jpeg": ["AT1515CK"], "3.jpeg": ["BX0578CE"], "4.jpeg": ["AC4249CB"], "5.jpeg": ["BC3496HC"], "6.jpeg": ["BC3496HC"], "7.jpeg": ["AO1306CH"], "8.jpeg": ["AE1077CO"], "9.jpeg": ["AB3391AK"], "10.jpeg": ["BE7425CB"], "11.jpeg": ["BE7425CB"], "12.jpeg": ["AB0680EA"], "13.jpeg": ["AB0680EA"], "14.jpeg": ["BM1930BM"], "15.jpeg": ["AI1382HB"], "16.jpeg": ["AB7333BH"], "17.jpeg": ["AB7642CT"], "18.jpeg": ["AC4921CB"], "19.jpeg": ["BC9911BK"], "20.jpeg": ["BC7007AK"], "21.jpeg": ["AB5649CI"], "22.jpeg": ["AX2756EK"], "23.jpeg": ["AA7564MX"], "24.jpeg": ["AM5696CK"], "25.jpeg": ["AM5696CK"], } def print_result(end_time, j): print(f"Processed {j} photos") print(f"Time {end_time}") print(f"One photo process {end_time/j} seconds") # ### Pillow # + from PIL import Image i = 0 start_time = time.time() for fname in testData.keys(): img_path = os.path.join(dirName, fname) im = Image.open(img_path) img = np.asarray(im) i += 1 end_time = time.time() - start_time print_result(end_time, i) # - # ### Pillow-SIMD # + from PIL import Image i = 0 start_time = time.time() for fname in testData.keys(): img_path = os.path.join(dirName, fname) im = Image.open(img_path) img = np.asarray(im) i += 1 end_time = time.time() - start_time print_result(end_time, i) # - # ### OpenCV # + import cv2 i = 0 start_time = time.time() for fname in testData.keys(): img_path = os.path.join(dirName, fname) img = cv2.imread(img_path) i += 1 end_time = time.time() - start_time print_result(end_time, i) # - # ### TurboJPEG # + from turbojpeg import TurboJPEG jpeg = TurboJPEG() i = 0 start_time = time.time() for fname in testData.keys(): img_path = os.path.join(dirName, fname) with open(img_path, 'rb') as in_file: img = jpeg.decode(in_file.read()) i += 1 end_time = time.time() - start_time print_result(end_time, i) # -
load_speed.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import sys sys.path.append('..') from text_recognizer.datasets.emnist import EmnistDataset # - emnist_data = EmnistDataset() print(emnist_data) emnist_data.load_or_generate_data() emnist_data.x_train.shape, emnist_data.y_train.shape emnist_data.x_test.shape, emnist_data.y_test.shape fig = plt.figure(figsize=(9, 9)) for i in range(9): ax = fig.add_subplot(3, 3, i + 1) ax.imshow(emnist_data.x_train[i].reshape(28, 28), cmap='gray') ax.set_title(emnist_data.mapping[np.argmax(emnist_data.y_train[i])])
lab1/notebooks/01-look-at-emnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <div style="color:#777777;background-color:#ffffff;font-size:12px;text-align:right;"> # prepared by <NAME> (QuSoft@Riga) | November 07, 2018 # </div> # <table><tr><td><i> I have some macros here. If there is a problem with displaying mathematical formulas, please run me to load these macros.</i></td></td></table> # $ \newcommand{\bra}[1]{\langle #1|} $ # $ \newcommand{\ket}[1]{|#1\rangle} $ # $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ # $ \newcommand{\inner}[2]{\langle #1,#2\rangle} $ # $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ # $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ # $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ # $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ # $ \newcommand{\mypar}[1]{\left( #1 \right)} $ # $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ # $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ # $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ # $ \newcommand{\onehalf}{\frac{1}{2}} $ # $ \newcommand{\donehalf}{\dfrac{1}{2}} $ # $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ # $ \newcommand{\vzero}{\myvector{1\\0}} $ # $ \newcommand{\vone}{\myvector{0\\1}} $ # $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ # $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ # $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ # $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ # $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ # $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ # $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ # $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ # <h2> One Bit </h2> # # In daily life, we use decimal number system. It is also called base-10 system, because we have 10 digits: # # $ 0,~1,~2,~3,~4,~5,~6,~7,~8, \mbox{ and } 9 $. # # In computer science, on the other hand, the widely used system is binary, which has only two digits: # # $ 0 $ and $ 1 $. # # One bit (or binary digit) is the basic unit of information used in computer science. # # It can also be seen as the smallest "useful" memory unit, which has two states named 0 and 1. # # One bit can be in (or store) either 0 or 1. # <h3> Four operators </h3> # # How many different operators can be defined on a single bit? # # <i>An operator, depending on the current state of the bit, updates the state of bit (the result may be the same state).</i> # # We can apply four different operators to a single bit: # <ol> # <li> Identity: $ I(0) = 0 $ and $ I(1) = 1 $ </li> # <li> Negation: $ NOT(0) = 1 $ and $ NOT(1) = 0 $ </li> # <li> Constant (Zero): $ ZERO(0) = 0 $ and $ ZERO(1) = 0 $ </li> # <li> Constant (One): $ ONE(0) = 1 $ and $ ONE(1) = 1 $ </li> # </ol> # The first operator is called IDENTITY, because it does not change the content/value of the bit. # # The second operator is named NOT, bacause it negates (flips) the value of bit. # # <i>Remark that 0 and 1 also refers to Boolean values False and True, respectively, and, False is the negation of True, and True is the negation of False.</i> # # The third (resp., fourth) operator returns a constant value 0 (resp., 1), whatever the input is. # <h3> Table representation </h3> # # Let's represent the transition of each operator by a table. # # In each table, # <ul> # <li> the header (first row) representing the initial values, </li> # <li> the first column representing the final values, </li> # <li> we use 1 if there is a transition between two values, and, </li> # <li> we use 0 if there is no transition between two values. </li> # </ul> # # The table representation of the identity operator is given below: # # $ # I = \begin{array}{c|cc} & \mathbf{0} & \mathbf{1} \\ \hline \mathbf{0} & 1 & 0 \\ \mathbf{1} & 0 & 1 \end{array} # $ # # The values in <b>bold</b> are the initial and final values of the bits. The non-bold values represent the transitions. # <ul> # <li> The top-left non-bold 1 represents the transtion $ 0 \rightarrow 0 $. </li> # <li> The bottom-right non-bold 1 represents the transtion $ 1 \rightarrow 1 $. </li> # <li> The top-right non-bold 0 means that there is no transition from 1 to 0. </li> # <li> The bottom-left non-bold 0 means that there is no transition from 0 to 1. </li> # </ul> # The reader may think the values 0 and 1 representing the transitions as True (On) and False (Off), respectively. # # Similarly, we can represent the other operators as below: # # $ # NOT = \begin{array}{c|cc} & \mathbf{0} & \mathbf{1} \\ \hline \mathbf{0} & 0 & 1 \\ \mathbf{1} & 1 & 0 \end{array} # ~~~~~~~~ # ZERO = \begin{array}{c|cc} & \mathbf{0} & \mathbf{1} \\ \hline \mathbf{0} & 1 & 1 \\ \mathbf{1} & 0 & 0 \end{array} # ~~~~~~~~ # ONE = \begin{array}{c|cc} & \mathbf{0} & \mathbf{1} \\ \hline \mathbf{0} & 0 & 0 \\ \mathbf{1} & 1 & 1 \end{array} # . # $ # <h3> Task 1 </h3> # # Convience yourself with the correctness of each table. # <h3> Reversibility and Irreversibility </h3> # # After applying Identity or NOT operatotor, we can easily determine the initial value by checking the final value. # <ul> # <li> In the case of Identity operator, we simply say the same value. </li> # <li> In the case of NOT operator, we simply say the other value, i.e., if the final value is 0 (resp., 1), then we say 1 (resp., 0). </li> # </ul> # # However, we cannot know the initial value by checking the final value after applying ZERO or ONE operator. # # Based on this observation, we can classify the operators into two types: <i>Reversible</i> and <i>Irreversible</i>. # <ul> # <li> If we can recover the initial value(s) from the final value(s), then the operator is called reversible like Identity and NOT operators. </li> # <li> If we cannot know the initial value(s) from the final value(s), then the operator is called irreversible like ZERO and ONE operators. </li> # </ul> # # <b> This classification is important, because, as will be seen later, the quantum evolution operators are reversible. </b> # # The identity operator does not have any affect on the computation, and so it is not interesting. # # But we will widely use NOT operator in our quantum algortihms. # <h3> Our first quantum circuit </h3> # # For our quantum programs, we will design quantum circuits. # # As a warm-up example, here we design a circuit with a single quantum bit. # # We also highlight the details on designing quantum circuits along with our codes. # + # # A quantum circuit is composed by quantum and classical bits. # # here are the objects that we use to create a quantum circuit from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit # we use a quantum register to keep our quantum bits. qreg = QuantumRegister(1) # in this example we will use a single quantum bit # To get an information from a quantum bit, it must be measured. (More details will appear.) # The measurement result is stored classically. # Therefore, we also use a classical regiser with classical bits creg = ClassicalRegister(1) # in this example we will use a single classical bit # now we can define our quantum circuit # it is composed by a quantum and a classical register mycircuit = QuantumCircuit(qreg,creg) # we apply operators on quantum bits # operators are also called as gates # we apply NOT operator represented as "x" # operator is a part of the circuit, and we should specify the quantum bit as the parameter mycircuit.x(qreg[0]) # (quantum) bits are enumerated starting from 0 # NOT operator or x-gate is applied to the first qubit of the quantum register # let's run our codes until now, and then draw our circuit print("Everything looks fine, let's continue ...") # + # we use matplotlib_circuit_drawer # we shortly refer it as "drawer" in our codes from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer # let's draw our circuit now drawer(mycircuit) # re-execute me if you DO NOT see the circuit diagram # - # <b>Congratulations!</b> if you see your diagram :-) # # <i><u> Remak</u></i>: Qiskit has its own counters to enumerate the objects ($ q2_0 $, $ c2_0 $, etc.). # <ul> # <li> They might be useful for debugging, but currently you can simply discard this part. # <li> You may also notice that when you re-run the same codes, the index of objects will be increased. # </ul> # # The value of the quantum bit is 0 at the beginning. Technically, we denote is as $ \ket{0} $, called <b><i>ket</i>-notation</b>. # # The value of the classical bit is also 0 at the beginning. # # Classical or quantum, each bit is represented as a straight line. You may think of it as a wire. # # The x-gate is shown as a square. # # The value of the quantum bit is expected to be $ \ket{1} $ after the operator. # # Let's measure the first qubit (define a mesurement operator), and then execute our circuit and see the result. # + # measurement is defined by associating a quantum bit to a classical bit mycircuit.measure(qreg[0],creg[0]) # the result will be stored in the classical bit print("Everything looks fine, let's continue ...") # - # let's draw the circuit again to see how the measurement is defined drawer(mycircuit) # reexecute me if you DO NOT see the circuit diagram # + # we are done with design of our circuit # now we can execute it # we execute quantum circuits many times (WHY?) # we use method "execute" and object "Aer" from qiskit library from qiskit import execute, Aer # we create a job object for execution of the circuit # there are three parameters # 1. mycircuit # 2. beckend on which it will be executed: we will use local simulator # 3. how_many_times will it be executed, let's pick it as 1024 job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=1024) # we can get the result of the outcome as follows counts = job.result().get_counts(mycircuit) print(counts) # usually quantum programs produce probabilistic outcomes # - # We expect to see 1 in each execution, and so the output should be $ \{\mbox{'1'}:~1024\} $. # # That is, the outcome 1 is measured 1024 times. # # <i>The output is composed by pairs, and each pair showing the measurement outcome and how many times it was measured.</i> # <br> # <i>For example, if the output is $ \{\mbox{'0': 500, '1': 524}\} $, then we know that the outcome 0 is measured 500 times and the outcome 1 is measured 524 times. </i> # # <b><i> Quantum programs usually give probabilistic outcomes. # # Therefore, we should execute them many times to get more reliable outcomes. </b></i> # <h3> My second quantum circuit </h3> # # Let's create a quantum circuit with four quantum bits. # # We use almost the same code with small modifications. # # Our comments explain only the modifications. # + # # My second quantum circuit # # we import all at once from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer # we will use 4 quantum bits and 4 classical bits qreg2 = QuantumRegister(4) creg2 = ClassicalRegister(4) mycircuit2 = QuantumCircuit(qreg2,creg2) # I will apply x-gate to the first quantum bit twice mycircuit2.x(qreg2[0]) mycircuit2.x(qreg2[0]) # I will apply x-gate to the fourth quantum bit once mycircuit2.x(qreg2[3]) # I will apply x-gate to the third quantum bit three times mycircuit2.x(qreg2[2]) mycircuit2.x(qreg2[2]) mycircuit2.x(qreg2[2]) # I will apply x-gate to the second quantum bit four times mycircuit2.x(qreg2[1]) mycircuit2.x(qreg2[1]) mycircuit2.x(qreg2[1]) mycircuit2.x(qreg2[1]) # if the size of quantum and classical registers are the same, we can define measurements with a single line code mycircuit2.measure(qreg2,creg2) # then each quantum bit and classical bit is associated with respect to their indices # let's run our codes until now, and then draw our circuit print("Everything looks fine, let's continue ...") # - drawer(mycircuit2) # re-execute me if you DO NOT see the circuit diagram # <h3> Task 2 </h3> # # Guess the outcome by checking the circuit. # # Then, compare your guess with the result obtained after executing our circuit 500 times. job = execute(mycircuit2,Aer.get_backend('qasm_simulator'),shots=500) counts = job.result().get_counts(mycircuit2) print(counts) # <h3> An important technical note:</h3> # # In Qiskit the outcomes are read from the classical register bottom to top # $$ creg[3],~creg[2],~creg[1],~creg[0] $$ # and so, we see the <b><u><i>reverse</i></u></b> of what we expected. # # <i>If you think the overall output as a single number, then it is assumed that the upper bits are the least significant bits.</i> # <h3> Reverse the output </h3> # # Let's write a method to print the reverse the outcomes as we expected. # # You may use this method later. # + def print_outcomes(counts): # takes a dictionary variable for outcome in counts: # for each key-value in dictionary reverse_outcome = '' for i in outcome: # each string can be considered as a list of characters reverse_outcome = i + reverse_outcome # each new symbol comes before the old symbol(s) print(reverse_outcome,"is observed",counts[outcome],"times") job = execute(mycircuit2,Aer.get_backend('qasm_simulator'),shots=1024) counts = job.result().get_counts(mycircuit2) # counts is a dictionary object in python print_outcomes(counts) # - # <h3>Pick a random number</h3> # # In your first circuit design task, you will be asked to apply x-gate randomly. # # Here is one of the method to pick a random number in python. # + from random import randrange n = 20 r=randrange(n) # pick a number from the list {0,1,...,n-1} print(r) # test this method by using a loop for i in range(10): print(randrange(n)) # - # <h3> Task 3 </h3> # # Design a quantum circuit with 10 quantum bits and 10 classical bits. # # For each quantum bit, flip a coin, and apply x-gate if the outcome is head. # # Measure your quantum bits. # # Execute your circuit 128 times. # # Repeat this task as mush as you want, and enjoy your random choices. # + from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer from qiskit.tools.visualization import matplotlib_circuit_drawer as drawer from random import randrange nib = 10 qreg = QuantumRegister(nib) creg = ClassicalRegister(nib) mycircuit = QuantumCircuit(qreg,creg) for j in range(nib): n=2 r = randrange(n) if (r == 0): mycircuit.x(qreg[j]) # mycircuit.measure(qreg[j],creg[j]) for i in range(nib): mycircuit.measure(qreg[i],creg[i]) print("work") drawer(mycircuit) job = execute(mycircuit,Aer.get_backend('qasm_simulator'),shots=128) counts = job.result().get_counts(mycircuit) print(counts) # - # <a href="..\bronze-solutions\B24_One_Bit_Solution.ipynb#task3">click for our solution</a>
bronze/.ipynb_checkpoints/B24_One_Bit-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # *** # # Beginner's Workshop on Machine Learning: _Kernels_ # *** # # This tutorial is basically split into four parts: # 1. Kernel Methods # 2. Standard Kernels # 3. Learning Kernels # 4. Approximating Kernels # # Throughout all parts, four distinct datasets can be used to # examine various settings. Descriptions of each of the datasets # can be found on the respective website: # # 1. MNIST (http://yann.lecun.com/exdb/mnist/) # 2. EMNIST (https://www.nist.gov/itl/iad/image-group/emnist-dataset) # 3. Fashion-MNIST (https://github.com/zalandoresearch/fashion-mnist) # 4. Cifar10 (https://www.cs.toronto.edu/~kriz/cifar.html) # # *** import matplotlib.pyplot as plt import sklearn.svm as svm import sklearn.metrics as metrics import sklearn.kernel_approximation as approx import numpy as np import time # %matplotlib inline # ## Datasets # # Functions for loading the various datasets. E.g. MNIST: This well-known dataset consists of hand-written digits and hence, splits into 10 classes ('0'-'9'). Moreover, train- and testsets are separated and contain 60.000 (10.000) examples in total. For more information about the datasets, follow one of the links above. def load_mnist_dataset(): import torchvision.datasets as datasets mnist_train = datasets.MNIST(root='./data/mnist', train=True, download=True, transform=None) mnist_test = datasets.MNIST(root='./data/mnist', train=False, download=True, transform=None) test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int) train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int) test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float) train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float) train /= 255. # normalize data to be in range [0,1] test /= 255. return train, train_labels, test, test_labels, [28, 28] def load_fashion_mnist_dataset(): import torchvision.datasets as datasets mnist_train = datasets.FashionMNIST(root='./data/fashion-mnist', train=True, download=True, transform=None) mnist_test = datasets.FashionMNIST(root='./data/fashion-mnist', train=False, download=True, transform=None) test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int) train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int) test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float) train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float) train /= 255. # normalize data to be in range [0,1] test /= 255. return train, train_labels, test, test_labels, [28, 28] def load_emnist_dataset(): import torchvision.datasets as datasets mnist_train = datasets.EMNIST(root='./data/emnist', split='balanced', train=True, download=True, transform=None) mnist_test = datasets.EMNIST(root='./data/emnist', split='balanced', train=False, download=True, transform=None) test_labels = np.array([mnist_test[i][1].numpy() for i in range(len(mnist_test))], dtype=np.int) train_labels = np.array([mnist_train[i][1].numpy() for i in range(len(mnist_train))], dtype=np.int) test = np.array([np.asarray(mnist_test[i][0]).reshape(28*28) for i in range(len(mnist_test))], dtype=np.float) train = np.array([np.asarray(mnist_train[i][0]).reshape(28*28) for i in range(len(mnist_train))], dtype=np.float) train /= 255. # normalize data to be in range [0,1] test /= 255. return train, train_labels, test, test_labels, [28, 28] def load_cifar10_dataset(): import torchvision.datasets as datasets cifar_train = datasets.CIFAR10(root='./data/cifar10', train=True, download=True, transform=None) cifar_test = datasets.CIFAR10(root='./data/cifar10', train=False, download=True, transform=None) test_labels = np.array([cifar_test[i][1] for i in range(len(cifar_test))], dtype=np.int) train_labels = np.array([cifar_train[i][1] for i in range(len(cifar_train))], dtype=np.int) test = np.array([np.asarray(cifar_test[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_test))], dtype=np.float) train = np.array([np.asarray(cifar_train[i][0].convert('F')).reshape(32*32) for i in range(len(cifar_train))], dtype=np.float) train /= 255. # normalize data to be in range [0,1] test /= 255. return train, train_labels, test, test_labels, [32, 32] # *** # ## Part I: Investigate Dataset using Outlier Detection # # There are two tasks: # # 1. Implement a version of the rbf kernel with $\exp(\frac{-\|x-y\|^2}{param})$ # 2. Implement the one-class SVM for the special case of $\nu = 1$ # train, train_labels, test, test_labels, im_size = load_mnist_dataset() train, train_labels, test, test_labels, im_size = load_fashion_mnist_dataset() # train, train_labels, test, test_labels, im_size = load_emnist_dataset() # train, train_labels, test, test_labels, im_size = load_cifar10_dataset() print('Train dataset size: ', train.shape) print('Test dataset size: ', test.shape) print('Num classes: ', np.unique(train_labels).size) print('Image size: ', im_size) def calc_rbf_kernel(X, Y=None, param=1.): if Y is None: Y = X return -1 def one_class_svm_nu_one(kernel): return -1 # + # 1. crude check of your kernel implementation K = calc_rbf_kernel(train[:100, :], param=10.) # 1.a. check if eigenvalues are all non-negative (=positive semi-definite matrix) w, _ = np.linalg.eig(K) assert(np.all(w) >= 0.0) # 1.b. check if there are significant abberations from the sklearn implementation K_check = metrics.pairwise.rbf_kernel(train[:100, :], gamma=1./10.) assert(np.max(np.abs(K-K_check)) < 1e-03) # 2. crude check of your special case one-class SVM implementation outlier_scores = one_class_svm_nu_one(0.1*np.random.randn(4,4) + np.eye(4)) assert(outlier_scores.size == 4) assert(outlier_scores.shape[0] == 4) # + CLASS = 1 NUM_TRAIN_SAMPLES = 200 RBF_PARAM = 1. class_inds = np.where(train_labels == CLASS)[0] train_inds = np.random.permutation(inds.size)[:NUM_TRAIN_SAMPLES] fig = plt.figure(figsize=(8, 4), dpi=80, facecolor='w', edgecolor='k') plt.subplot(2, 5, 1) plt.title('Mean', fontsize=16) class_mean = np.mean(train[class_inds, :], axis=0) plt.imshow(class_mean.reshape(im_size[0], im_size[1]), cmap='binary') plt.xticks([], [], fontsize=14) plt.yticks([], [], fontsize=14) plt.subplot(2, 5, 2) plt.title('Median', fontsize=16) class_median = np.median(train[class_inds, :], axis=0) plt.imshow(class_median.reshape(im_size[0], im_size[1]), cmap='binary') plt.xticks([], [], fontsize=14) plt.yticks([], [], fontsize=14) kernel = calc_rbf_kernel(train[class_inds[train_inds], :], train[class_inds, :], param=RBF_PARAM) print(kernel.shape) outlier_scores = one_class_svm_nu_one(kernel) outlier_ranking = np.argsort(-outlier_scores) print(outlier_scores.shape) print('Memory footprint of your RBF matrix ({0}x{1} elements): {2}MB'.format( kernel.shape[0], kernel.shape[1], np.int(kernel.size*kernel.itemsize / (1024.*1024.)))) for i in range(3): plt.subplot(2, 5, 3+i) plt.title('Nominal {0}'.format(i+1), fontsize=16) plt.imshow(train[class_inds[outlier_ranking[i]], :].reshape(im_size[0], im_size[1]), cmap='binary') plt.xticks([], [], fontsize=14) plt.yticks([], [], fontsize=14) for i in range(5): plt.subplot(2, 5, 6+i) plt.title('Outlier {0}'.format(i+1), fontsize=16) plt.imshow(train[class_inds[outlier_ranking[-(i+1)]], :].reshape(im_size[0], im_size[1]), cmap='binary') plt.xticks([], [], fontsize=14) plt.yticks([], [], fontsize=14) fig.tight_layout() # - plt.plot(np.arange(outlier_ranking.size), outlier_scores[outlier_ranking], '.-r') plt.grid(True) plt.xlabel('sorted samples') plt.ylabel('outlier score') # *** # ## Part II: Image Classification using Histogram Intersection Kernels # train, train_labels, test, test_labels, im_size = load_mnist_dataset() # train, train_labels, test, test_labels, im_size = load_fashion_mnist_dataset() # train, train_labels, test, test_labels, im_size = load_emnist_dataset() train, train_labels, test, test_labels, im_size = load_cifar10_dataset() print('Train dataset size: ', train.shape) print('Test dataset size: ', test.shape) print('Num classes: ', np.unique(train_labels).size) print('Image size: ', im_size) # + def calc_hist_intersect_kernel(X, Y=None): if Y is None: Y = X return -1 K = calc_hist_intersect_kernel(train[:100, :], train[:10, :]) assert(K.shape[0] == 100 and K.shape[1] == 10) # - def calc_histogram_features(X, bins=10, normalize=False): num_samples, num_features = X.shape return -1 # + POS_CLASS = 2 # 6: Shirt NEG_CLASS = 9 # 0: T-Shirt/Top BINS = 20 NORMALIZE = True NUM_TRAIN_SAMPLES = 200 NUM_TEST_SAMPLES = 200 pos_class_inds = np.where(train_labels == POS_CLASS)[0] neg_class_inds = np.where(train_labels == NEG_CLASS)[0] class_inds = np.concatenate((pos_class_inds, neg_class_inds)) lbl = train_labels[class_inds] lbl[lbl == NEG_CLASS] = -1 lbl[lbl == POS_CLASS] = +1 # split in train and test chunks rand_inds = np.random.permutation(class_inds.size) train_inds = rand_inds[:NUM_TRAIN_SAMPLES] test_inds = rand_inds[NUM_TRAIN_SAMPLES:] test_inds = test_inds[:NUM_TEST_SAMPLES] print('There are {0} positive samples and {1} negative samples in the dataset.'.format( np.sum(lbl[rand_inds[:NUM_TRAIN_SAMPLES+NUM_TEST_SAMPLES]] == +1), np.sum(lbl[rand_inds[:NUM_TRAIN_SAMPLES+NUM_TEST_SAMPLES]] == -1))) # calculate the histogram features and the corresponding train/test kernels transf_train = calc_histogram_features(train[class_inds, :], bins=BINS, normalize=NORMALIZE) K1 = calc_hist_intersect_kernel(transf_train[train_inds, :]) K2 = calc_hist_intersect_kernel(transf_train[train_inds, :], transf_train[test_inds, :]) # train the SVM using our histogram kernel classifier = svm.SVC(kernel='precomputed') classifier.fit(K1, lbl[train_inds]) pred = classifier.decision_function(K2.T) # pred = classifier.predict(K2.T) fpr, tpr, thresholds = metrics.roc_curve(lbl[test_inds], pred, pos_label=1) auc_hist_kernel = metrics.auc(fpr, tpr) # train the SVM using standard RBF kernels classifier = svm.SVC(kernel='rbf', gamma='auto') classifier.fit(train[class_inds[train_inds], :].copy(), lbl[train_inds]) pred = classifier.decision_function(train[class_inds[test_inds], :]) # pred = classifier.predict(train[class_inds[test_inds], :].copy()) fpr, tpr, thresholds = metrics.roc_curve(lbl[test_inds], pred, pos_label=1) auc_rbf_kernel = metrics.auc(fpr, tpr) print('AUC RBF kernel: ', auc_rbf_kernel) print('AUC histogram intersection kernel: ', auc_hist_kernel) fig = plt.figure(figsize=(12, 4), dpi=80, facecolor='w', edgecolor='k') plt.subplot(1, 2, 1) plt.title('Mean Class-wise Histograms') mean_hist_neg = np.mean(transf_train[lbl==-1, :], axis=0) mean_hist_pos = np.mean(transf_train[lbl==+1, :], axis=0) hist_intersect = np.min((mean_hist_neg, mean_hist_pos), axis=0) plt.bar(np.arange(BINS), mean_hist_neg, alpha=0.8) plt.bar(np.arange(BINS)+0.2, mean_hist_pos, alpha=0.8) plt.plot(np.arange(BINS), hist_intersect, '.-r') plt.grid(True) plt.legend(['intersect', '-', '+'], loc=2) plt.xlabel('bins') plt.ylabel('mean occurance') plt.subplot(1, 2, 2) plt.title('Histogram Example') hist_neg_exm = transf_train[0, :] hist_pos_exm = transf_train[-1, :] hist_intersect = np.min((hist_neg_exm, hist_pos_exm), axis=0) plt.bar(np.arange(BINS), hist_neg_exm, alpha=0.8) plt.bar(np.arange(BINS)+0.2, hist_pos_exm, alpha=0.8) plt.plot(np.arange(BINS), hist_intersect, '.-r') plt.grid(True) plt.legend(['intersect', 'exm (lbl={0})'.format(lbl[0]), 'exm (lbl={0})'.format(lbl[-1])], loc=2) plt.xlabel('bins') plt.ylabel('mean occurance') plt.tight_layout() # - # *** # ## Part III: Multiple Kernel Learning # # Your tasks: # 1. Finish the 'normalize_kernel' function (=normalize datapoints in feature space) # 2. Implement 'center_kernel' (=center datapoints in feature space) # 3. Implement 'combine_kernels' # + def normalize_kernel(K): # A kernel K is normalized, iff K_ii = 1 \forall i return -1 def center_kernel(K): # Mean free in feature space return -1 def combine_kernels(kernels, mix): return -1 # - def fit_mkl(kernels, lbls, pnorm=2., precision=1e-9, max_iter=100): iter = 0 lbls = lbls[:, np.newaxis] dm = np.ones(len(kernels), dtype=np.float) / len(kernels) lastsol = np.zeros(len(kernels)) while np.max(np.abs(lastsol-dm)) > precision and iter < max_iter: # train svm combined_kernel = combine_kernels(kernels, dm) classifier = svm.SVC(kernel='precomputed') classifier.fit(combined_kernel, lbls.ravel()) # calculate new kernel mixing coefficients lastsol = dm.copy() sol = np.abs(classifier.dual_coef_) alphas = np.zeros((1, lbls.size)) alphas[0, classifier.support_] = sol norm_w_sq_m = np.zeros((len(kernels), 1)) res = lbls.dot(lbls.T)*alphas.T.dot(alphas) for l in range(len(kernels)): norm_w_sq_m[l] = dm[l]*dm[l]*np.sum(res*kernels[l]) sum_norm_w = np.sum(np.power(norm_w_sq_m, pnorm/(pnorm+1.0))) sum_norm_w = np.power(sum_norm_w, 1.0/pnorm) dm = np.power(norm_w_sq_m, 1.0/(pnorm+1.0))/sum_norm_w dm_norm = np.sum(np.power(abs(dm), pnorm)) dm_norm = np.power(dm_norm, 1.0/pnorm) iter+=1 print('Num iterations = {0}.'.format(iter)) print('New mixing coefficients:') print(dm) print(dm_norm) return dm # + P_NORM = 2.2 MKL_BINS = [2, 5, 10, 200] MKL_RBF = [0.001, 1.0, train[0].size] all_inds = np.concatenate((train_inds, test_inds)) train_kernels = list() test_kernels = list() for num_bins in MKL_BINS: transf_train = calc_histogram_features(train[class_inds, :], bins=num_bins, normalize=NORMALIZE) kernel = calc_hist_intersect_kernel(transf_train[all_inds, :]) kernel = center_kernel(kernel) kernel = normalize_kernel(kernel) train_kernels.append(kernel[:train_inds.size, :train_inds.size]) test_kernels.append(kernel[:train_inds.size, train_inds.size:]) for param in MKL_RBF: kernel = calc_rbf_kernel(train[class_inds[all_inds], :], param=param) kernel = center_kernel(kernel) kernel = normalize_kernel(kernel) train_kernels.append(kernel[:train_inds.size, :train_inds.size]) test_kernels.append(kernel[:train_inds.size, train_inds.size:]) # 1. learn kernel weights mix = fit_mkl(train_kernels, lbl[train_inds], pnorm=P_NORM) combined_kernel = combine_kernels(train_kernels, mix) classifier = svm.SVC(kernel='precomputed') classifier.fit(combined_kernel, lbl[train_inds]) combined_kernel = combine_kernels(test_kernels, mix) pred = classifier.decision_function(combined_kernel.T) fpr, tpr, thresholds = metrics.roc_curve(lbl[test_inds], pred, pos_label=1) auc_mkl_kernel = metrics.auc(fpr, tpr) # 2. use sum kernel (=inf norm) mix_uniform = np.ones(len(train_kernels), dtype=np.float) / len(combined_kernel) combined_kernel = combine_kernels(train_kernels, mix_uniform) classifier = svm.SVC(kernel='precomputed') classifier.fit(combined_kernel, lbl[train_inds]) combined_kernel = combine_kernels(test_kernels, mix_uniform) pred = classifier.decision_function(combined_kernel.T) fpr, tpr, thresholds = metrics.roc_curve(lbl[test_inds], pred, pos_label=1) auc_sum_kernel = metrics.auc(fpr, tpr) print('AUC RBF kernel: ', auc_rbf_kernel) print('AUC histogram intersection kernel: ', auc_hist_kernel) print('AUC combined kernel (sum): ', auc_sum_kernel) print('AUC combined kernel (p={0}): '.format(P_NORM), auc_mkl_kernel) plt.bar(np.arange(mix.size)[:len(MKL_BINS)], mix[:len(MKL_BINS), 0]) plt.bar(np.arange(mix.size)[len(MKL_BINS):], mix[len(MKL_BINS):, 0]) plt.xticks(np.arange(mix.size)) plt.grid(True) plt.xlabel('kernel') plt.ylabel('weight') # - # *** # ## Part IV: Kernel Approximations # # It is often more natural to design useful kernels rather than explicitly designing feature maps. However, kernel matrices grow quadratic with the number of samples which makes them unsuitable for large-scale problems. To overcome this problem, approximations such as 'Random Kitchen Sinks' calculate a feature map that emulates certain kernels (e.g. Gaussian and RBF kernels). In this part, we examine the runtime and convergence behaviour of random Fourier features. # # Your single task is to implement the 'calc_rbf_features' function below. # train, train_labels, test, test_labels, im_size = load_mnist_dataset() # train, train_labels, test, test_labels, im_size = load_fashion_mnist_dataset() # train, train_labels, test, test_labels, im_size = load_emnist_dataset() train, train_labels, test, test_labels, im_size = load_cifar10_dataset() print('Train dataset size: ', train.shape) print('Test dataset size: ', test.shape) print('Num classes: ', np.unique(train_labels).size) print('Image size: ', im_size) # (Bochner) Assume that $K$ is a positive definite translation-invariant (real) kernel (i.e. $K(x,y) = K(x-y)$) then # it can be represented as the Fourier transform of a probability distribution: # # $K(\Delta) = \int \exp(iw^T\Delta)p(w)dw = \mathbb{E}_{w\sim p}[\exp(iw^T\Delta)] = \mathbb{E}_{w\sim p}[\cos(w^T\Delta)] = \mathbb{E}_{w\sim p}[\cos(w^T(x-y))] = \mathbb{E}_{w\sim p}[\cos(w^Tx)\cos(w^Ty) + \sin(w^Tx)\sin(w^Ty)]$ # # Hence, we can approximate $K$ by using $\sin(w^Tx)$ and $\cos(w^Tx)$ features with randomly sampled $w \sim p$: # # $K(\Delta) \approx \frac{1}{D}\sum_{i=1}^D[\cos(w_i^Tx)\cos(w_i^Ty) + \sin(w_i^Tx)\sin(w_i^Ty)] =^! \phi(x)^T \phi(y)$ # # 1. For isotropic Gaussian kernel (which is translation invariant) with $\sigma=1$, $K(\Delta) = \exp(-\|\Delta\|^2/2)$ the corresponding probability distribution is $p(w) = (2\pi)^{-\frac{|\mathcal{X}|}{2}} \exp{\frac{-\|w\|^2}{2}}$. Numpy's build-in normal distribtion (setting $\mu=0$ and $\sigma=1$) reads $p(x) = (2\pi)^{-\frac{1}{2}} \exp{\frac{-\|x\|^2}{2}}$ (for a single feature). Use numpy's 'normal' function to sample $w$. # # 2. We do not want to sample $\cos$ and $\sin$. Instead, we use a variant that samples an offset $b \sim unif(0,2\pi)$ and only uses a single $\cos$ twice, i.e. feature $i=1,\ldots,D$ becomes $\sqrt{2}\cos(w_i^Tx + b_i)$. # + def calc_rbf_features(X, n_features=50): return -1 feats = calc_rbf_features(np.random.normal(size=(10, 20)), n_features=5) assert(feats.size == 10*5) assert(feats.shape[0] == 10 and feats.shape[1] == 5) # - # ### (a) Runtime and convergence behaviour for increasing number of random Fourier features # + POS_CLASS = 6 # 6: Shirt NEG_CLASS = 0 # 0: T-Shirt/Top NUM_SAMPLES = 1000 NUM_FEATURES = [5, 10, 20, 40, 80, 160, 320, 640, 1280, 12800] pos_class_inds = np.where(train_labels == POS_CLASS)[0] neg_class_inds = np.where(train_labels == NEG_CLASS)[0] sample_inds = np.concatenate((pos_class_inds, neg_class_inds)) lbls = train_labels[sample_inds] lbls[lbls == NEG_CLASS] = -1 lbls[lbls == POS_CLASS] = +1 rand_inds = np.random.permutation(sample_inds.size) sample_inds = sample_inds[rand_inds[:NUM_SAMPLES]] lbls = lbls[rand_inds[:NUM_SAMPLES]] kernel = calc_rbf_kernel(train[sample_inds, :], param=1./1.) times = np.zeros((len(NUM_FEATURES), 2)) approx_error = np.zeros(len(NUM_FEATURES)) for i in range(len(NUM_FEATURES)): rbf_features = calc_rbf_features(train[sample_inds, :], n_features=NUM_FEATURES[i]) approx_kernel = rbf_features.dot(rbf_features.T) classifier = svm.LinearSVC(loss='hinge') start = time.time() classifier.fit(rbf_features, lbls) end = time.time() times[i, 0] = end - start classifier = svm.SVC(kernel='precomputed') start = time.time() classifier.fit(kernel, lbls) end = time.time() times[i, 1] = end - start approx_error[i] = np.sum(np.abs(approx_kernel - kernel) / kernel.size) fig = plt.figure(figsize=(12, 4), dpi=80, facecolor='w', edgecolor='k') plt.subplot(1, 2, 1) plt.plot(np.arange(len(NUM_FEATURES)), approx_error, '-.b') plt.xticks(np.arange(len(NUM_FEATURES)+1), NUM_FEATURES) plt.grid(True) plt.xlabel('# random features') plt.ylabel('reconstruction error') plt.subplot(1, 2, 2) plt.plot(np.arange(len(NUM_FEATURES)), times[:, 0], '-.b') plt.plot(np.arange(len(NUM_FEATURES)), times[:, 1], '-.r') plt.xticks(np.arange(len(NUM_FEATURES)+1), NUM_FEATURES) plt.grid(True) plt.legend(['random', 'rbf'], loc=2) plt.xlabel('# random features') plt.ylabel('processing time [sec]') # - # ### (b) Runtime and convergence behaviour for increasing number of samples # + NUM_SAMPLES = [100, 200, 400, 800, 1600, 3200, 6400] NUM_FEATURES = 640 mem = np.zeros((len(NUM_SAMPLES), 2)) times = np.zeros((len(NUM_SAMPLES), 2)) approx_error = np.zeros(len(NUM_SAMPLES)) for i in range(len(NUM_SAMPLES)): sample_inds = np.concatenate((pos_class_inds, neg_class_inds)) lbls = train_labels[sample_inds] lbls[lbls == NEG_CLASS] = -1 lbls[lbls == POS_CLASS] = +1 rand_inds = np.random.permutation(sample_inds.size) sample_inds = sample_inds[rand_inds[:NUM_SAMPLES[i]]] lbls = lbls[rand_inds[:NUM_SAMPLES[i]]] rbf_features = calc_rbf_features(train[sample_inds, :], n_features=NUM_FEATURES) kernel = calc_rbf_kernel(train[sample_inds, :], param=1./1.) approx_kernel = rbf_features.dot(rbf_features.T) classifier = svm.LinearSVC(loss='hinge') start = time.time() classifier.fit(rbf_features, lbls) end = time.time() times[i, 0] = end - start classifier = svm.SVC(kernel='precomputed') start = time.time() classifier.fit(kernel, lbls) end = time.time() times[i, 1] = end - start approx_error[i] = np.sum(np.abs(approx_kernel - kernel) / kernel.size) mem[i, 0] = np.int(sample_inds.size*NUM_FEATURES*train.itemsize / (1024.*1024.)) mem[i, 1] = np.int(kernel.size*kernel.itemsize / (1024.*1024.)) fig = plt.figure(figsize=(12, 4), dpi=80, facecolor='w', edgecolor='k') plt.subplot(1, 3, 1) plt.plot(np.arange(len(NUM_SAMPLES)), approx_error, '-.b') plt.xticks(np.arange(len(NUM_SAMPLES)+1), NUM_SAMPLES) plt.grid(True) plt.xlabel('# samples') plt.ylabel('reconstruction error') plt.subplot(1, 3, 2) plt.plot(np.arange(len(NUM_SAMPLES)), times[:, 0], '-.b') plt.plot(np.arange(len(NUM_SAMPLES)), times[:, 1], '-.r') plt.xticks(np.arange(len(NUM_SAMPLES)+1), NUM_SAMPLES) plt.grid(True) plt.legend(['random', 'rbf'], loc=2) plt.xlabel('# samples') plt.ylabel('processing time [sec]') plt.subplot(1, 3, 3) plt.plot(np.arange(len(NUM_SAMPLES)), mem[:, 0], '-.b') plt.plot(np.arange(len(NUM_SAMPLES)), mem[:, 1], '-.r') plt.xticks(np.arange(len(NUM_SAMPLES)+1), NUM_SAMPLES) plt.grid(True) plt.legend(['random', 'rbf'], loc=2) plt.xlabel('# samples') plt.ylabel('memory footprint [MB]') fig.tight_layout() # -
lectures/kernel_methods_exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- import pandas as pd from pprint import pprint import json import numpy as np path = 'E:\\anil\\IIT Sop\\Term02\\MATH497\\ICO_data\\original_pickle\\' # + # ICD_list table must be re-built from, presumably, ICD_for_Enc due to some entries being # pre-18th birthday. ICD_list entries are not timestamped! table_names = ['all_encounter_data', 'demographics', 'encounters', 'family_hist_for_Enc', 'family_hist_list', 'ICD_for_Enc', 'ICD_list', 'macula_findings_for_Enc', 'SL_Lens_for_Enc', 'SNOMED_problem_list', 'systemic_disease_for_Enc', 'systemic_disease_list'] person_data = ['demographics','family_hist_list', 'systemic_disease_list', 'SNOMED_problem_list'] encounter_data = ['all_encounter_data', 'encounters', 'family_hist_for_Enc', 'ICD_for_Enc', 'macula_findings_for_Enc', 'SL_Lens_for_Enc', 'systemic_disease_for_Enc'] # + # read tables into dataframes dfs = [ pd.read_pickle(path + name + '.pickle') if name != 'ICD_list' else None for name in table_names ] # rename columns in all dataframes to avoid unicode decode error for df in dfs: if df is not None: df.columns = [col.decode("utf-8-sig") for col in df.columns] # - # #### Grouping all encounter nbrs under respective person nbr # aggregate encounter nbrs under person number from tables with encounter numbers encounter_key = 'Enc_Nbr' person_key = 'Person_Nbr' encounters_by_person = {} for df in dfs: if df is not None: df_columns =set(df.columns.values) if encounter_key in df_columns and person_key in df_columns: for row_index, dfrow in df.iterrows(): rowdict = dict(dfrow) person_nbr = rowdict[person_key] encounter_nbr = rowdict[encounter_key] encounters_by_person.setdefault(person_nbr, set()).add(encounter_nbr) # + encounter_nbr_to_person_nbr = {} for person_nbr, encounter_nbrs in encounters_by_person.iteritems(): for encounter_nbr in encounter_nbrs: encounter_nbr_to_person_nbr.setdefault(encounter_nbr, set()).add(person_nbr) for encounter_nbr, person_nbrs in encounter_nbr_to_person_nbr.iteritems(): if len(person_nbrs)>1: print 'Encounter {} mapped to multiple persons {}'.format(encounter_nbr, str(list(person_nbrs))) # - # #### Grouping properties under Person_Nbr (person related information) person_key = 'Person_Nbr' encounter_key = 'Enc_Nbr' # columns_to_ignore = [u'Person_ID', u'Person_Nbr', u'Enc_ID', u'Enc_Nbr', u'Enc_Date'] data_by_persons = {} data_by_persons_type = {} for df_index, df in enumerate(dfs): df_name = table_names[df_index] data_by_persons[df_name] = {} if df is not None: df_columns =set(df.columns.values) if person_key in df_columns and encounter_key not in df_columns: print 'Processing dataframe: {}'.format(df_name) # check if encounter is primary key in the table if len(df) == len(df[person_key].unique()): data_by_persons_type[df_name] = 'single' for row_index, dfrow in df.iterrows(): rowdict = dict(dfrow) for k, v in rowdict.iteritems(): if isinstance(v, pd.tslib.Timestamp): rowdict[k] = v.toordinal() person_nbr = rowdict[person_key] data_by_persons[df_name][person_nbr] = rowdict else: data_by_persons_type[df_name] = 'list' for row_index, dfrow in df.iterrows(): rowdict = dict(dfrow) for k, v in rowdict.iteritems(): if isinstance(v, pd.tslib.Timestamp): rowdict[k] = v.toordinal() person_nbr = rowdict[person_key] data_by_persons[df_name].setdefault(person_nbr, []).append(rowdict) # #### Grouping properties under encounter_nbrs (encounter related information) encounter_key = 'Enc_Nbr' # columns_to_ignore = [u'Person_ID', u'Person_Nbr', u'Enc_ID', u'Enc_Nbr', u'Enc_Date'] data_by_encounters = {} data_by_encounters_type = {} for df_index, df in enumerate(dfs): df_name = table_names[df_index] data_by_encounters[df_name] = {} if df is not None: df_columns =set(df.columns.values) if encounter_key in df_columns: print 'Processing dataframe: {}'.format(df_name) # check if encounter is primary key in the table if len(df) == len(df[encounter_key].unique()): data_by_encounters_type[df_name] = 'single' for row_index, dfrow in df.iterrows(): rowdict = dict(dfrow) for k, v in rowdict.iteritems(): if isinstance(v, pd.tslib.Timestamp): rowdict[k] = v.toordinal() encounter_nbr = rowdict[encounter_key] data_by_encounters[df_name][encounter_nbr] = rowdict else: data_by_encounters_type[df_name] = 'list' for row_index, dfrow in df.iterrows(): rowdict = dict(dfrow) for k, v in rowdict.iteritems(): if isinstance(v, pd.tslib.Timestamp): rowdict[k] = v.toordinal() encounter_nbr = rowdict[encounter_key] data_by_encounters[df_name].setdefault(encounter_nbr, []).append(rowdict) # ## Aggregating encounter entities under respective person entity all_persons = [] for person_nbr in encounters_by_person: person_object = {person_key:person_nbr, 'encounters':[]} # insert all person related info in person_object for df_name in data_by_persons_type: if data_by_persons_type[df_name] =="single": if person_nbr in data_by_persons[df_name]: person_single_data_row = data_by_persons[df_name][person_nbr] for key, value in person_single_data_row.iteritems(): person_object[key] = value else: if person_nbr in data_by_persons[df_name]: person_multiple_data_row = data_by_persons[df_name][person_nbr] person_object[df_name] = person_multiple_data_row person_object[df_name+"_count"] = len(person_multiple_data_row) # insert all encounter related information as a an encounter object into the person object for enc_nbr in encounters_by_person[person_nbr]: encounter_object = {encounter_key: enc_nbr} for df_name in data_by_encounters_type: if data_by_encounters_type[df_name] =="single": if enc_nbr in data_by_encounters[df_name]: encounter_single_data_row = data_by_encounters[df_name][enc_nbr] for key, value in encounter_single_data_row.iteritems(): encounter_object[key] = value else: if enc_nbr in data_by_encounters[df_name]: encounter_multiple_data_row = data_by_encounters[df_name][enc_nbr] encounter_object[df_name] = encounter_multiple_data_row encounter_object[df_name+"_count"] = len(encounter_multiple_data_row) person_object['encounters'].append(encounter_object) all_persons.append(person_object) # ## Dropping duplicated columns and then full na rows across tables with open('20170226_ICO_data_combined_before_duplicate_fields_drop.json', 'w') as fh: json.dump(all_persons, fh) # + # drop repeated columns in nested fields columns_to_drop = ['Enc_ID', 'Enc_Nbr', 'Enc_Date', 'Person_ID', 'Person_Nbr','Date_Created', 'Enc_Timestamp'] for person_index in range(len(all_persons)): for df_name in data_by_persons_type: if data_by_persons_type[df_name] != "single": if df_name in all_persons[person_index] and df_name!='encounters': for rp_index in range(len(all_persons[person_index][df_name])): for column_to_drop in columns_to_drop: try: del all_persons[person_index][df_name][rp_index][column_to_drop] except: pass for enc_obj_index in range(len(all_persons[person_index]['encounters'])): enc_obj = all_persons[person_index]['encounters'][enc_obj_index] for df_name in data_by_encounters_type: if data_by_encounters_type[df_name] != "single": if df_name in enc_obj: for rp_index in range(len(enc_obj[df_name])): for column_to_drop in columns_to_drop: try: del enc_obj[df_name][rp_index][column_to_drop] except: pass all_persons[person_index]['encounters'][enc_obj_index] = enc_obj # - # drop full na object rows for list properties for person_index in range(len(all_persons)): for df_name in data_by_persons_type: if data_by_persons_type[df_name] != "single": if df_name in all_persons[person_index] and df_name!='encounters': for rp_index in reversed(range(len(all_persons[person_index][df_name]))): if all(pd.isnull(all_persons[person_index][df_name][rp_index].values())): del all_persons[person_index][df_name][rp_index] for enc_obj_index in range(len(all_persons[person_index]['encounters'])): enc_obj = all_persons[person_index]['encounters'][enc_obj_index] for df_name in data_by_encounters_type: if data_by_encounters_type[df_name] != "single": if df_name in enc_obj: for rp_index in reversed(range(len(enc_obj[df_name]))): if all(pd.isnull(enc_obj[df_name][rp_index].values())): del enc_obj[df_name][rp_index] all_persons[person_index]['encounters'][enc_obj_index] = enc_obj # update counts for list properties for person_index in range(len(all_persons)): for df_name in data_by_persons_type: if data_by_persons_type[df_name] != "single": if df_name in all_persons[person_index] and df_name!='encounters': all_persons[person_index][df_name+"_count"] = len(all_persons[person_index][df_name]) for enc_obj_index in range(len(all_persons[person_index]['encounters'])): enc_obj = all_persons[person_index]['encounters'][enc_obj_index] for df_name in data_by_encounters_type: if data_by_encounters_type[df_name] != "single": if df_name in enc_obj: enc_obj[df_name+"_count"] = len(enc_obj[df_name]) all_persons[person_index]['encounters'][enc_obj_index] = enc_obj with open('20170226_ICO_data_after_duplicate_fields_drop.json', 'w') as fh: json.dump(all_persons, fh) # creating a dataframe from aggregated data combined_data_df = pd.DataFrame.from_records(all_persons) combined_data_df.set_index(['Person_Nbr'], inplace=True) # + # TODO Similar functions for processing nested/list data has to be written # add std race column def standardize_race(race): standard_race_conversion_dict = {'African American':'Black or African American', 'Black or African American':'Black or African American', 'Black/African American (Not Hispanic)':'Black or African American', 'American Indian or Alaska Native':'American Indian or Alaska Native', 'American Indian/Alaskan Native':'American Indian or Alaska Native', 'American Indian':'American Indian or Alaska Native', 'Native American Indian':'American Indian or Alaska Native', 'Alaskan Native':'American Indian or Alaska Native', 'Asian':'Asian','Chinese':'Asian','Indian':'Asian','Caucasian':'White', 'White (Not Hispanic / Latino)':'White, not Hispanic or Latino', 'White':'White','Declined to specify':'Unknown', 'Unknown/Not Reported':'Unknown','Greek':'White', 'Native Hawaiian or Other Pacific Islander':'Native Hawaiian and Other Pacific Islander', 'Hawaiian':'Native Hawaiian and Other Pacific Islander', 'Other Pacific Islander (Not Hawaiian)':'Native Hawaiian and Other Pacific Islander', 'Hispanic Or Latino (All Races)':'Hispanic or Latino','Hispanic':'Hispanic or Latino', 'More than one race':'Two or More Races','Multiracial':'Two or More Races', 'Multi-racial':'Two or More Races','Moroccan':'White',float('nan'):'Unknown', 'Other Race':'Other Race','Other Race (Jamaican)':'Other Race' } if race in standard_race_conversion_dict: return standard_race_conversion_dict[race] return 'Unknown' combined_data_df['Race_Std'] = combined_data_df['Race'].apply(standardize_race) # - combined_data_df.head() # to access encounters for a given Person_Nbr # these objects have nested encounter related info like all_encounter_data, systemic_disease_for_Enc, family_hist_for_Enc combined_data_df.loc[109227, 'encounters'] combined_data_df.to_pickle(path+'20170226_ICO_Data_combined.pickle')
data_aggregation/table_merging_ICO.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import warnings warnings.filterwarnings("ignore") from lale.lib.lale import NoOp from lale.lib.sklearn import KNeighborsClassifier from lale.lib.sklearn import LogisticRegression from lale.lib.sklearn import Nystroem from lale.lib.sklearn import PCA from lale.operators import make_union, make_choice, make_pipeline # #### Lale provides an `|` combinator or a function make_choice() to allow only one of its arguments to be applied at once in the overall pipeline. In this example, the first step of the pipeline is a choice between Nystroem and NoOp. This means that the data will either be transformed using Nystroem or will be left as is (NoOp is a transformer that does nothing). The second step in the pipeline is a PCA, and the third step is again a choice between two popular classifiers. kernel_tfm_or_not = NoOp | Nystroem kernel_tfm_or_not.visualize() tfm = PCA clf = make_choice(LogisticRegression, KNeighborsClassifier) clf.visualize() optimizable = kernel_tfm_or_not >> tfm >> clf optimizable.visualize() # #### Use the graph to select the best performing model for a dataset. We use Iris dataset from sklearn for this demonstration. Hyperopt is used to scan the hyperparameter search space and select the best performing path from the above graph. # + from lale.lib.lale import Hyperopt from lale.datasets import load_iris_df (X_train, y_train), (X_test, y_test) = load_iris_df() # - hpo_trainable = Hyperopt(estimator=optimizable, max_evals=3) hpo_trained = hpo_trainable.fit(X_train, y_train) best_estimator = hpo_trained.get_pipeline() best_estimator.visualize() # #### Look at the results from all trials and retrieve pipelines of other names or types. hpo_trained.summary() worst_name = hpo_trained.summary().loss.argmax() print(worst_name) worst_estimator = hpo_trained.get_pipeline(worst_name) worst_estimator.visualize() # + #FIXME #worst_estimator_in_sklearn_format = hpo_trained.get_pipeline(worst_name, astype='sklearn')
examples/demo_pipeline_hyperopt.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import tensorflow as tf import numpy as np import helper from log_progress import log_progress import matplotlib.pyplot as plt import scipy.misc import imageio from PIL import Image from PIL import ImageDraw from PIL import ImageColor import cv2 import warnings warnings.filterwarnings('ignore') # - # Helper function that uses TF to do image segmentation # + """ Apply segmentation to one image :param sess: TF session :param logits: TF Tensor for the logits :param keep_prob: TF Placeholder for the dropout keep probability :param image_pl: TF Placeholder for the image placeholder :param image_array: Path to the folder that contains the datasets :param image_shape: Tuple - Shape of image :return: Immage array """ def apply_inference(sess, logits, keep_prob, image_pl, image, image_shape): original_shape = (375, 1242) image_array = scipy.misc.imresize(image, image_shape) im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, image_pl: [image_array]}) # Splice out second column (road), reshape output back to image_shape im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1]) # If road softmax > 0.5, prediction is road segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1) original_shape = (375, 1242) # Create mask based on segmentation to apply to original image overlay = np.dot(segmentation, np.array([[0, 255, 0, 127]])) overlay = scipy.misc.toimage(overlay, mode="RGBA") overlay = scipy.misc.imresize(overlay, original_shape) bit_mask = overlay[:,:,1] > 127 bit_mask = bit_mask.reshape(original_shape[0], original_shape[1], 1) bit_mask = np.dot(bit_mask, np.array([[255, 255, 255, 127]])) # Prepare PIL images mask = scipy.misc.toimage(bit_mask, mode="RGBA") overlay = Image.fromarray(overlay, mode="RGBA") street_im = Image.fromarray(image) # Paste overlay into original image street_im.paste(overlay, None, mask) return np.array(street_im) # - # Load saved TF graph and apply it to test image # + run_dir = "./runs" test_dir = "./data/data_road/testing/image_2" runs = [d for d in os.listdir(run_dir) if os.path.isdir(os.path.join(run_dir, d)) and d.startswith('Epochs')] assert runs, "runs folder does not contain any saved models. Please run main.py and do training first." # When sorted alphabetically by loss, the first should be the best one model_dir = os.path.join(run_dir,sorted(runs)[0] +'/model') print(f"Loading model from {model_dir}") with tf.Session(graph=tf.Graph()) as sess: saver = tf.train.import_meta_graph(model_dir+'/model.meta') saver.restore(sess, tf.train.latest_checkpoint(model_dir)) test_image = os.path.join(test_dir, 'um_000000.png') image_shape = (160, 576) image = imageio.imread(test_image) plt.figure(figsize=(12, 8)) plt.imshow(image) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name("image_input:0") logits_tensor = graph.get_tensor_by_name("logits:0") keep_prob_tensor = graph.get_tensor_by_name("keep_prob:0") seg_img = apply_inference(sess, logits_tensor, keep_prob_tensor, image_tensor, image, image_shape) plt.figure(figsize=(12, 8)) plt.imshow(seg_img) # - # Apply neural network to all test images and generate video # + source_image_folder = "./data/data_road/testing/image_2" video_name = 'video.mp4' images = list([img for img in os.listdir(source_image_folder) if img.endswith(".png")]) assert images, "Source image folder must contain some PNG files" frame = cv2.imread(os.path.join(source_image_folder, images[0])) height, width, layers = frame.shape video = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'MP4V'), 15, (width,height)) with tf.Session(graph=tf.Graph()) as sess: saver = tf.train.import_meta_graph(model_dir+'/model.meta') saver.restore(sess, tf.train.latest_checkpoint(model_dir)) test_image = os.path.join(test_dir, 'um_000000.png') image_shape = (160, 576) graph = tf.get_default_graph() image_tensor = graph.get_tensor_by_name("image_input:0") logits_tensor = graph.get_tensor_by_name("logits:0") keep_prob_tensor = graph.get_tensor_by_name("keep_prob:0") for image_file in log_progress(sorted(images), every=1): image = imageio.imread(os.path.join(source_image_folder, image_file)) seg_img = apply_inference(sess, logits_tensor, keep_prob_tensor, image_tensor, image, image_shape) video.write(seg_img) cv2.destroyAllWindows() video.release() # - # %%HTML <video width="621" height="188" controls> <source src="https://github.com/cazacov/CarND-Semantic-Segmentation/blob/master/video.mp4?raw=true" type="video/mp4"> </video>
inference.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R (SageMath) # language: r # name: ir-sage # --- # # Simulation in R # # In this document we will explore ways to run simulations with R, and use some random number generators. # # We can generate random numbers using a variety of functions and distributions. # # The first kind of randomness that people understand is uniform randomness. This is where everything is equally likely. We generate randomness like this with ```runif(min, max)```. # # ``` # library(dplyr) # library(ggplot2) # runif(n=10, min=0, max=1) # ``` library(dplyr) library(ggplot2) runif(n=10, min=0, max=1) # Now let's save some random numbers and make a table. samp<-runif(n=1000000, min=0, max=1) num<-c(1:1000000) table<-data.frame(cbind(num, samp)) head(table) summary(table) # ## How to use random numbers # # We might be trying to simualate a batter in a baseball game. Maybe we are trying to simulate <NAME>. He is one of the best hitter in baseball today. His batting average is 0.312. So we could say that he gets a hit if the random number is less than .312. This is easily done with the ```mutate``` command. # # ``` # table <- table %>% # mutate(hit = ifelse(samp < .312, 1, 0)) # head(table) # ``` table <- table %>% mutate(hit = ifelse(samp < .312, 1, 0)) head(table) # # Num of hits # # We can calculate the number of hits by ```sum(table$hit)``` sum(table$hit) # ## Seeds and random number generators # # Computers don't actually create randomness. They actually use complicated algorithms that produce a determined number. These numbers just look random to use. We can make all the computers in this room come up with the same random numbers by setting the *seed* on the computers to be the same. # # ``` # set.seed(13) # runif(n=10, min=0, max=1) # ``` # # Compare your numbers to another classmates. set.seed(13) runif(n=10, min=0, max=1) set.seed(13) runif(n=10, min=0, max=1) # ## Evaluating streakiness # # Are players streaky? Or is it just the whims of fate. Let's look at some data and compare it to what we see if the world was just mathematically random. # load(file="kobe_basket.rda") kobe_basket # **Warning: Don't wory about the next line. It is a function that we need to determine how streaky Kobe is.** calc_streak = function(x) { if (!is.atomic(x)) x = x[,1] if (any(!x %in% c("H","M"))) stop('Input should only contain hits ("H") and misses ("M")') y = rep(0,length(x)) y[x == "H"] = 1 y = c(0, y, 0) wz = which(y == 0) streak = diff(wz) - 1 return(data.frame(length = streak)) } dim(kobe_basket) kobe_streak <- calc_streak(kobe_basket$shot) IQR(kobe_streak$length) ggplot(data = kobe_streak, aes(x = length)) + geom_histogram(binwidth = 1) # **Describe Kobe Bryants streak distribution.** # ## Simulated Kobe # # Kobe was about 45% shooter. So we want to imagine a shooter that isn't streaky at all that just works the way randomness says the situation should work. Check out the code below. shot_outcomes <- c("H", "M") # Now ```shot_outcomes``` is a list of two outcomes H and M for hit and miss. # # We now ```sample``` from this list the same number of times that Kobe shot in his game, 133. # # Here we use the ```sample``` function this allows us to imagine picking from a list over and over. There are various options to the command as well, as you will see. sim_basket <- sample(shot_outcomes, size = 133, replace = TRUE, prob=c(0.45, 0.55)) sim_basket # This creates a list of hits and misses with the same expected proportions as a typical Kobe Bryant after 133 shots. # # + sim_streak = calc_streak(sim_basket) ggplot(data = sim_streak, aes(x=length)) + geom_histogram(binwidth = 1) # - IQR(sim_streak$length) streaks = c(kobe_streak,sim_streak) boxplot(streaks) # **What does this say about Kobe being a streaky shooter?** # ## Simulating stock market # # Stock returns are much more likely to follow the normal model than a uniform model. A fictional stock has the following properties: On average it gains 1.001 times its opening price during the trading day, but that can vary by a standard deviation of 0.005 on any given day (this is its volatility). We can simulate a single sample path for this stock by taking the cumulative product from a Normal distribution with a mean of 1.001 and a sd of 0.005. Assuming the stock opens at $20/per share, here is a sample path for 365 days of trading. # # days <- 365 changes <- rnorm(365,mean=1.001,sd=0.005) plot(cumprod(c(20,changes)),type='l',ylab="Price",xlab="day",main="closing price (1 possible path)") # + runs <- 100000 #simulates future movements and returns the closing price on day 365 generate.path <- function(){ days <- 365 changes <- rnorm(365,mean=1.001,sd=0.005) sample.path <- cumprod(c(20,changes)) closing.price <- sample.path[days+1] #+1 because we add the opening price } mc.closing <- replicate(runs,generate.path()) # - head(mc.closing) mean(mc.closing) quantile(mc.closing, c(.05, .5, .95)) # # ### What would be a big streak for Kobe? # # We again come back to Kobe, a player we know makes 45% percent of his shots. Kobe took about 26500 shots in his career. What would be a reasonable longest shooting streak for a player like that? # # # **Run a simulation of Kobe's career and see what the longest streak was? Then do it again. Did you get a different answer? Do it a few times. How much variability is there in the answer?**
Simulation Master/simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # PaddlePaddle Image Classification with OpenVINO # # This demo shows how to run MobileNetV3 Large + SSLD PaddePaddle model on OpenVINO natively. This demo will fetch the model from the PaddleHub server, and we will export the model with the PaddeHub's save_inference_model feature. # # Authors: # <NAME>, PhD (OpenVINO Edge AI Software Evangelist - Intel) # <NAME> (Intel PRC) # + import os, os.path import sys import json import urllib.request import cv2 import numpy as np from scipy.special import softmax from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork from IPython.display import Image def image_preprocess_mobilenetv3(img_path): img = cv2.imread(img_path) img = cv2.resize(img, (224,224), interpolation=cv2.INTER_LANCZOS4) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = np.transpose(img, [2,0,1]) / 255 img = np.expand_dims(img, 0) img_mean = np.array([0.485, 0.456,0.406]).reshape((3,1,1)) img_std = np.array([0.229, 0.224, 0.225]).reshape((3,1,1)) img -= img_mean img /= img_std return img.astype(np.float32) def top_k(result, image_classes, topk=5): softmax_result = softmax(result) top_indices = np.argsort(-softmax_result)[:topk] #print(top_indices) top_softmax = softmax_result[top_indices] for index, softmax_probability in zip(top_indices, top_softmax): print(f"Class Name: {image_classes[index]}, Probability: {softmax_probability:.5f}") # + [markdown] tags=[] # ## Download the Model from PaddleHub and PaddleClas # # You can locate the mobilenet_v3_large_imagenet_ssld from the PaddleHub list. # # https://www.paddlepaddle.org.cn/hublist # # To learn more about PaddleClas, you can visit here. # # https://github.com/PaddlePaddle/PaddleClas/blob/release/2.2/docs/en/tutorials/quick_start_recognition_en.md # + import paddlehub as hub #Source: https://www.paddlepaddle.org.cn/hublist model = hub.Module(name="mobilenet_v3_large_imagenet_ssld") model.save_inference_model(dirname="paddlehub_model/", model_filename="paddlehub_model/inference.pdmodel", params_filename="paddlehub_model/inference.pdiparams") image_classes = model.label_list # - # ## Run Inference on OpenVINO # + ie = IECore() net = ie.read_network("paddlehub_model/inference.pdmodel") #MobileNetV3_large_x1_0 filename = "coco.jpg" test_image = image_preprocess_mobilenetv3(filename) # pdmodel might be dynamic shape, this will reshape based on the input input_key = list(net.input_info.items())[0][0] # 'inputs' net.reshape({input_key: test_image.shape}) #load the network on CPU exec_net = ie.load_network(net, 'CPU') assert isinstance(exec_net, ExecutableNetwork) #perform the inference step output = exec_net.infer({input_key: test_image}) result_ie = list(output.values())[0][0] #filter and print the top 5 results top_k(result_ie, image_classes) Image(filename=filename) # - #Results may vary due to the slight difference in pre-processing #Please refer to the documentation from PaddleHub to see the underlying #preprocessing function paddle_results = model.classification(images=[cv2.imread('coco.jpg')], top_k=5) for class_name, softmax_probability in paddle_results[0].items(): print(f"Class Name: {class_name}, Probability: {softmax_probability:.5f}")
notebooks/100-mobilenetv3/100-classification-paddle-model-paddlehub.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 (SageMath) # language: python # name: python2 # --- import Lagrange_poly3 as lp3 import Lagrange_poly4 as lp4 import Polynomial as pn import Polynomial_vec as pv import Polynomial_dict as pd import numpy as np # # Homework 7 # # ### <NAME> # ### 4/23/16 # ### Lagrange_poly3 # # The utility of interpolating functions was contained in a class which creates an instance with a given set of interpolating points. The functions of the class can both call for an interpolating point value and graph the actual points of the function with the interpolated function plotted over it. xp = np.linspace(0, np.pi, 5) yp = np.sin(xp) middle = xp[2] - xp[1] / 2 LagPol = lp3.LagrangePoly(xp, yp) print 'Function interpolates: ', print LagPol.p_L(middle) print 'Actual sine result is: ', print np.sin(middle) LagPol.graph(np.sin, 5, 0, np.pi) # ### Lagrange_poly4 # # This class is much like the other class above, with the sole acception that a python function, the min and max points, and the value of the number of points to be taken is passed in as an argument instead. It is thus within the initilization of the instance that the xp and yp vectors are generated for the instance at hand, and the remainder of the functions work in an analogous manner. LagPol = lp4.LagrangePolyFunctions(np.sin) middle = LagPol.xp[2] - LagPol.xp[1] / 2 print 'Function interpolates: ', print LagPol.p_L(middle) print 'Actual sine result is: ', print np.sin(middle) LagPol.graph() # ### Polynomial_vec # # This implementation vectorizes the polynomials class in order to provide for increased efficiency. The idea would be that vectorized functions would have increased speed over their iterative counterparts, at least for data structures of significant size. In what follows, the vectorized functions of the new module are compared for efficiency against the unvectorized iterative version, and the surprising result that the unverctorized version is actually more efficient in these cases results. It is possible that for very large polynomials this problem would be resolved, but as of yet the cause of this is unsure, although surely due to something the programer failed to do properly. coefficients = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5] # %timeit normal = pn.Polynomial(coefficients) # %timeit vectorized = pv.Polynomial(coefficients) normal = pn.Polynomial(coefficients) # %timeit normal.__add__(normal) vectorized = pv.Polynomial(coefficients) # %timeit vectorized.__add__(vectorized) # %timeit normal.__mul__(normal) # %timeit vectorized.__mul__(vectorized) # %timeit normal.__call__(5) # %timeit vectorized.__call__(5) # ### Polynomial_dict # # A dictionary version of the polynomial function is also implemented. This means that the coefficients of the polynomial are stored in a python dictionary, which uses keys, the degree of the coefficient power, in order to call up values, which are the coefficients of the powers themselves. These implementations are always slower than their iterative counterparts and sometimes slower, sometimes faster than the vectorized implementation. Assuming that ideally vectorized code would be the fastest implementation, a disadvantage of the dictionary class would be its efficiency. Benefits of the dictionary would be easily readable and adjustable code, as the addition of new keys or alteration of values in a dictionary is a very simple and straight forward proceedure. Moreover, dictionaries are user friendly in many other respects, such as their simple double equal sign test for equality. # %timeit pd.Polynomial({0:5, 1:5, 2:5, 3:5, 4:5, 5:5, 6:5, 7:5, 8:5, 9:5, 10:5}) dictionary = pd.Polynomial({0:5, 1:5, 2:5, 3:5, 4:5, 5:5, 6:5, 7:5, 8:5, 9:5, 10:5}) # %timeit dictionary.__add__(dictionary) # %timeit dictionary.__mul__(dictionary)
hw7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/graviraja/100-Days-of-NLP/blob/applications%2Fclassification/NER%20tagging%20with%20BiLSTM%20CRF.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="AeK9dZJmkDQg" colab_type="text" # ### NER Dataset # + id="j5jkJpQvdgjT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="9c482ba5-1be7-4a03-9f1f-9e7d28a036be" # !pip install kaggle # + id="s5eEi8pCZYYu" colab_type="code" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 90} outputId="46d574e9-a88e-4996-ecd5-517f2f72f38d" from google.colab import files files.upload() # + id="SBOvFvrBdm1s" colab_type="code" colab={} # !mkdir ~/.kaggle # !cp kaggle.json ~/.kaggle # !chmod 600 ~/.kaggle/kaggle.json # + id="WKDfern7eAi2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="f8ca8352-b51a-425e-d4b4-414abc17e3ab" # !kaggle datasets download -d abhinavwalia95/entity-annotated-corpus # + id="u1LvqrEIeFvG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="09f371cc-742c-4fe0-955d-fa09731ed154" # !unzip entity-annotated-corpus.zip # + [markdown] id="ZuSoeUl3kFz2" colab_type="text" # ### Imports # + id="uD_gnl5YQHEH" colab_type="code" colab={} import os import time import numpy as np import pandas as pd import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data from itertools import chain from collections import Counter from sklearn import metrics from sklearn.preprocessing import MultiLabelBinarizer from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import seaborn as sns # + id="5NemuvQdVpGx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c89fa64d-e8f2-47af-aad5-c31a1a1bf411" device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device # + [markdown] id="WNeeukPGkLSC" colab_type="text" # ### Data Exploration # + id="hM1ADdqCeRcr" colab_type="code" colab={} datafile = 'ner_dataset.csv' # + id="vXIfU9fBfB6b" colab_type="code" colab={} df = pd.read_csv(datafile, encoding="latin1", error_bad_lines=False) df = df.fillna(method='ffill') # + id="_T1WOTYVfRLE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="80b3b03e-6faa-4b8f-fd62-9769e240cbf9" df.head() # + id="GAzzVjPqf7Lb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="472c92c6-5908-45ea-98d0-a7cc8153e9d7" len(df) # + id="9c16vzijfkwL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 302} outputId="6e01e9de-3da1-4b7c-aebe-df57297e2ae4" tags = list(df.Tag.unique()) tags # + id="0eKeAR8DftPW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="0d885394-48bc-42f6-b158-e562fb0838f0" plt.figure(figsize=(15, 5)) sns.countplot(df.Tag.values) # + id="wPDMLtKdhFrv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="84fb2282-d75f-4308-b939-454eba3c1e47" num_tags = len(tags) num_tags # + [markdown] id="2haF4Hn2kRNz" colab_type="text" # ### Sentences # + id="pSmI25jJhjLN" colab_type="code" colab={} agg_func = lambda s: [(w, t) for w, p, t in zip(s["Word"].values.tolist(), s["POS"].values.tolist(), s["Tag"].values.tolist())] group = df.groupby("Sentence #").apply(agg_func) lines = [s for s in group] # + id="RknB-LoUiATn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 420} outputId="8f02394f-d784-4640-831a-b9cacd5dce60" lines[0] # + id="zLJvFI3yqgi0" colab_type="code" colab={} sentences = [['<START>'] + [tokens[0] for tokens in line] + ['<END'] for line in lines] tags = [['<START>'] + [tokens[1] for tokens in line] + ['<END'] for line in lines] # + id="2A0xItc3iFhX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a34cf123-148a-4553-b73b-2ed5cef3922e" len(sentences), len(tags) # + id="tH_xnhJFkj1M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 337} outputId="0528c4c4-c577-4c6c-a073-3559d89967da" sen_lengths = [len(sent) for sent in sentences] plt.figure(figsize=(15, 5)) sns.countplot(sen_lengths) # + id="q5HlzOzGmEDw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="db6808f9-8bd6-48fa-a6b3-13902204877f" train_sentences, valid_sentences, train_tags, valid_tags = train_test_split(sentences, tags, test_size=0.2, random_state=42) valid_sentences, test_sentences, valid_tags, test_tags = train_test_split(valid_sentences, valid_tags, test_size=0.5, random_state=42) len(train_sentences), len(valid_sentences), len(test_sentences) # + [markdown] id="vIFVl-O_kUJb" colab_type="text" # ### Vocabulary # + id="c9eJdmdhoamw" colab_type="code" colab={} class Vocab: def __init__(self, word2id, id2word): self.UNK = '<UNK>' self.PAD = '<PAD>' self.START = '<START>' self.END = '<END>' self.__word2id = word2id self.__id2word = id2word def get_word2id(self): return self.__word2id def get_id2word(self): return self.__id2word def __getitem__(self, item): if self.UNK in self.__word2id: return self.__word2id.get(item, self.__word2id[self.UNK]) return self.__word2id[item] def __len__(self): return len(self.__word2id) def id2word(self, idx): return self.__id2word[idx] # + id="9YlwfEdHocqP" colab_type="code" colab={} def build_vocab(data, freq_cutoff=5, is_tags=False): word_counts = Counter(chain(*data)) valid_words = [w for w, d in word_counts.items() if d >= freq_cutoff] valid_words = sorted(valid_words, key=lambda x: word_counts[x], reverse=True) valid_words += ['<PAD>'] word2id = {w: idx for idx, w in enumerate(valid_words)} if not is_tags: word2id['<UNK>'] = len(word2id) valid_words += ['<UNK>'] return Vocab(word2id=word2id, id2word=valid_words) # + id="yzicTNXdj8Rl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 370} outputId="e50cca54-2809-400e-fc52-4a687e0f6c03" words_vocab = build_vocab(train_sentences) tags_vocab = build_vocab(train_tags, is_tags=True) len(tags_vocab), tags_vocab.get_id2word() # + [markdown] id="RBVQHwTVk88N" colab_type="text" # ### NER Dataset # + id="r3RhXjbdk_Jf" colab_type="code" colab={} MAX_SEN_LEN = 50 # + id="v_Jc4l5NVwnp" colab_type="code" colab={} class NERDataset(data.Dataset): def __init__(self, sentences, tags, max_len): self.sentences = sentences self.tags = tags self.max_len = max_len def __len__(self): return len(self.sentences) def __getitem__(self, item): sentence = self.sentences[item] tag = self.tags[item] tokens, tags = [], [] for word, t in zip(sentence, tag): tokens.append(words_vocab[word]) tags.append(tags_vocab[t]) return torch.LongTensor(tokens), torch.LongTensor(tags) # + id="2McipX1Nl-8c" colab_type="code" colab={} train_dataset = NERDataset(train_sentences, train_tags, MAX_SEN_LEN) valid_dataset = NERDataset(valid_sentences, valid_tags, MAX_SEN_LEN) test_dataset = NERDataset(test_sentences, test_tags, MAX_SEN_LEN) # + id="B1cFfg0IqFE5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="0016dda6-d22c-4993-f7ec-0bf1985f8b17" train_dataset[0] # + [markdown] id="wwiiGscn29-V" colab_type="text" # ### DataLoaders # + id="uGPqxgEboFV1" colab_type="code" colab={} BATCH_SIZE = 128 # + id="QpHqwpKNqtcz" colab_type="code" colab={} def collate_fn(data): data.sort(key=lambda x: len(x[0]), reverse=True) sentences, tags = zip(*data) # Merge questions (from tuple of 1D tensor to 2D tensor). sent_lengths = [len(sent) for sent in sentences] inputs = torch.zeros(len(sentences), max(sent_lengths)).long() labels = torch.zeros(len(sentences), max(sent_lengths)).long() for i, (sent, lab) in enumerate(zip(sentences, tags)): end = sent_lengths[i] inputs[i, :end] = sent[:end] labels[i, :end] = lab[:end] return inputs, labels, sent_lengths # + id="ohUjd1c_rwqB" colab_type="code" colab={} train_data_loader = data.DataLoader(train_dataset, BATCH_SIZE, shuffle=True, collate_fn=collate_fn) valid_data_loader = data.DataLoader(valid_dataset, BATCH_SIZE, shuffle=False, collate_fn=collate_fn) test_data_loader = data.DataLoader(test_dataset, BATCH_SIZE, shuffle=False, collate_fn=collate_fn) # + id="QQHLq_P4n3AT" colab_type="code" colab={} sample = next(iter(train_data_loader)) # + id="XlQV_LyNs9PT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="955b7ea3-3a22-441b-8959-9f3dc03f70b8" sample[0].shape, sample[1].shape, len(sample[2]) # + [markdown] id="q08pzQGp3Ami" colab_type="text" # ### BiLSTM-CRF Model # + id="kFzphEAGTxad" colab_type="code" colab={} class BiLSTMCRF(nn.Module): def __init__(self, vocab_size, emb_dim, hid_dim, tag_vocab_size, sent_pad_token, tag_start_token, dropout=0.3): super().__init__() self.hid_dim = hid_dim self.sent_pad_token = sent_pad_token self.tag_start_token = tag_start_token self.tag_vocab_size = tag_vocab_size self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=0) self.lstm = nn.LSTM( emb_dim, hid_dim, bidirectional=True, batch_first=True ) self.emission = nn.Linear(hid_dim * 2, tag_vocab_size) self.transition = nn.Parameter(torch.rand(tag_vocab_size, tag_vocab_size)) self.dropout = nn.Dropout(dropout) def forward(self, sentences, lengths, tags): # sentences => [batch_size, seq_len] # lengths => [batch_size] # tags => [batch_size, seq_len] mask = (sentences != self.sent_pad_token).to(device) # mask => [batch_size, seq_len] embed = self.embedding(sentences) embed = self.dropout(embed) # embed => [batch_size, seq_len, emb_dim] packed_inp = nn.utils.rnn.pack_padded_sequence(embed, lengths, batch_first=True) packed_output, _ = self.lstm(packed_inp) outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True) # outputs => [batch_size, seq_len, hid_dim * 2] combined = torch.cat((outputs[:, :, :self.hid_dim], outputs[:, :, self.hid_dim:]), dim=-1) combined = self.dropout(combined) # combined => [batch_size, seq_len, hid_dim * 2] emission_scores = self.emission(combined) # emission_scores => [batch_size, seq_len, tag_size] loss = self.vitebri_loss(tags, mask, emission_scores) # loss => [batch_size] return loss def vitebri_loss(self, tags, mask, emit_scores): # tags => [batch_size, seq_len] # mask => [batch_size, seq_len] # emit_scores => [batch_size, seq_len, tag_size] batch_size, sent_len = tags.shape # calculate the ground truth score score = torch.gather(emit_scores, 2, tags.unsqueeze(2)).squeeze(2) # emission scores of actual tags # score => [batch_size, seq_len] # add the transition scores to the emission scores # ignore the start token tag score score[:, 1:] += self.transition[tags[:, :-1], tags[:, 1:]] # consider only the scores of actual tokens not the padded gold_scores = (score * mask.type(torch.float)).sum(dim=1) # gold_scores => [batch_size] # calculate the scores of the partition (Z) # tensor to hold the accumulated sequence scores at each time step # at the inital time step score will be on dim=0 scores_upto_t = emit_scores[:, 0].unsqueeze(1) # scores_upto_t => [batch_size, 1, tag_size] for i in range(1, sent_len): # get the current batch_size batch_t = mask[:, i].sum() # get the accumulated scores till now (only the current batch size) scores_unpad = scores_upto_t[:batch_t] # scores_unpad => [batch_t, 1, tag_size] # add the transition scores for this time step scores_with_trans = emit_scores[:batch_t, i].unsqueeze(1) + self.transition # scores_with_trans => [batch_t, tag_size, tag_size] # add to the accumulation sum_scores = scores_unpad.transpose(1, 2) + scores_with_trans # sum_scores => [batch_t, tag_size, tag_size] # apply the following to overcome the overflow problems # since the exp(some_big_number) will cause issues # log(Σ exp(z_k)) = max(z) + log(Σ exp(z_k - max(z))) # log(Σ exp(z_k)) = log(Σ exp(z_k - c + c)) # = log(Σ exp(z_k - c) * exp(c)) # = log(Σ exp(z_k - c)) + log(exp(c)) # = log(Σ exp(z_k - c)) + c # by taking c as max(z) # log(Σ exp(z_k)) = max(z) + log(Σ exp(z_k - max(z))) [log_sum_exp] # get the maximum score of the current time step max_t = sum_scores.max(dim=1)[0].unsqueeze(1) # max_t => [batch_t, 1, tag_size] sum_scores = sum_scores - max_t # sum_scores => [batch_t, tag_size, tag_size] scores_t = max_t + torch.logsumexp(sum_scores, dim=1).unsqueeze(1) # scores_t => [batch_t, 1, tag_size] # update the accumulation scores scores_upto_t = torch.cat((scores_t, scores_upto_t[batch_t:]), dim=0) # scores_upto_t => [batch_size, 1, tag_size] final_scores = scores_upto_t.squeeze(1) # final_scores => [batch_size, tag_size] max_final_scores = final_scores.max(dim=-1)[0] # max_final_scores => [batch_size] predicted_scores = max_final_scores + torch.logsumexp(final_scores - max_final_scores.unsqueeze(1), dim=1) # predicted_scores => [batch_size] vitebri_loss = predicted_scores - gold_scores # vitebri_loss => [batch_size] return vitebri_loss def predict(self, sentences, lengths): # sentences => [batch_size, seq_len] # lengths => [batch_size] batch_size = sentences.size(0) mask = (sentences != self.sent_pad_token).to(device) # mask => [batch_size, seq_len] embed = self.embedding(sentences) embed = self.dropout(embed) # embed => [batch_size, seq_len, emb_dim] packed_inp = nn.utils.rnn.pack_padded_sequence(embed, lengths, batch_first=True) packed_output, _ = self.lstm(packed_inp) outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_output, batch_first=True) # outputs => [batch_size, seq_len, hid_dim * 2] combined = torch.cat((outputs[:, :, :self.hid_dim], outputs[:, :, self.hid_dim:]), dim=-1) combined = self.dropout(combined) # combined => [batch_size, seq_len, hid_dim * 2] emission_scores = self.emission(combined) # emission_scores => [batch_size, seq_len, tag_size] # to store the tags predicted at each time step # since at the begining every tag is start tag create the list with start tags tags = [[[self.tag_start_token] for _ in range(self.tag_vocab_size)]] * batch_size # tags => [batch_size, tag_size, 1] scores_upto_t = emission_scores[:, 0].unsqueeze(1) # scores_upto_t => [batch_size, 1, tag_size] for i in range(1, max(lengths)): # get the current batch_size batch_t = mask[:, i].sum() # get the accumulated scores till now (only the current batch size) scores_unpad = scores_upto_t[:batch_t] # scores_unpad => [batch_t, 1, tag_size] # add the transition scores for this time step scores_with_trans = emission_scores[:batch_t, i].unsqueeze(1) + self.transition # scores_with_trans => [batch_t, tag_size, tag_size] # add to the accumulation sum_scores = scores_unpad.transpose(1, 2) + scores_with_trans # sum_scores => [batch_t, tag_size, tag_size] max_scores_t, max_ids_t = torch.max(sum_scores, dim=1) max_ids_t = max_ids_t.tolist() # max_scores_t => [batch_t, tag_size] # max_ids_t => [batch_t, tag_size] # add the current time step predicted tags tags[:batch_t] = [[tags[b][k] + [j] for j, k in enumerate(max_ids_t[b])] for b in range(batch_t)] # update the accumulation scores scores_upto_t = torch.cat((max_scores_t.unsqueeze(1), scores_upto_t[batch_t:]), dim=0) # scores_upto_t => [batch_size, tag_size] scores = scores_upto_t.squeeze(1) # scores => [batch_size, tag_size] _, max_ids = torch.max(scores, dim=1) max_ids = max_ids.tolist() # max_ids => [batch_size] # tags => [batch_size, tag_size, seq_len] tags = [tags[b][k] for b, k in enumerate(max_ids)] # tags => [batch_size, seq_len] return tags # + id="FEY0Z8WMVmZm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 118} outputId="81f43467-c2ce-4abb-aead-fe3b6bcd578f" vocab_size = len(words_vocab) sent_pad_token = words_vocab[words_vocab.PAD] tag_start_token = tags_vocab[tags_vocab.START] emb_dim = 50 hid_dim = 100 tag_vocab_size = len(tags_vocab) model = BiLSTMCRF(vocab_size, emb_dim, hid_dim, tag_vocab_size, sent_pad_token, tag_start_token) model.to(device) # + id="Vuu7YjwQt3-v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f91e00e6-7d56-477e-9638-2cad759a8120" def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters') # + [markdown] id="1Fpc9vWq3EY3" colab_type="text" # ### Optimizer # + id="eKhNhP5_uFsr" colab_type="code" colab={} optimizer = optim.Adam(model.parameters(), lr=0.01) # + [markdown] id="zv1kwAoG3IzE" colab_type="text" # ### Training Method # + id="WJQZHAmGuP-y" colab_type="code" colab={} def train(model, iterator, optimizer, clip): model.train() epoch_loss = 0 total_sentences = 0 for batch in iterator: sentences = batch[0].to(device) tags = batch[1].to(device) seq_lengths = batch[2] # sentences => [batch_size, seq_len] # tags => [batch_size, seq_len] optimizer.zero_grad() batch_loss = model(sentences, seq_lengths, tags) # batch_loss => [batch_size] loss = batch_loss.mean() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() epoch_loss += batch_loss.sum().item() total_sentences += len(sentences) return epoch_loss / total_sentences # + [markdown] id="TeoX6uxx3MVe" colab_type="text" # ### Evaluation Method # + id="BK-UR_62uS0l" colab_type="code" colab={} def evaluate(model, iterator): model.eval() epoch_loss = 0 total_sentences = 0 with torch.no_grad(): for batch in iterator: sentences = batch[0].to(device) tags = batch[1].to(device) seq_lengths = batch[2] # sentences => [batch_size, seq_len] # tags => [batch_size, seq_len] batch_loss = model(sentences, seq_lengths, tags) # batch_loss => [batch_size] loss = batch_loss.mean() epoch_loss += batch_loss.sum().item() total_sentences += len(sentences) return epoch_loss / total_sentences # + [markdown] id="5urAtI4W3OUf" colab_type="text" # ### Training # + id="Y8_N8-U6uUtq" colab_type="code" colab={} def epoch_time(start_time, end_time): elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs # + id="XL5kQHBluWkT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 353} outputId="8eb40c47-29ab-42a8-cce5-9689075c7d7b" N_EPOCHS = 10 CLIP = 2 best_valid_loss = float('inf') for epoch in range(N_EPOCHS): start_time = time.time() train_loss = train(model, train_data_loader, optimizer, CLIP) valid_loss = evaluate(model, valid_data_loader) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'model.pt') print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Val. Loss: {valid_loss:.3f}') # + [markdown] id="TIXBCe7S3c3y" colab_type="text" # ### Testing # + id="MqDAWdc8vVAf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0b44819d-3262-459d-a7f8-f243ac4602d8" model.load_state_dict(torch.load('model.pt')) # + id="tKC73CrN3Ycb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="af58ac45-7215-4db7-c83d-d4f50e450b6e" test_loss = evaluate(model, test_data_loader) print(f'Test Loss: {test_loss:.3f}') # + [markdown] id="2Q4JpzZ-6YbX" colab_type="text" # ### Metrics # + id="dwi_NbV96bKO" colab_type="code" colab={} def cal_metrics(model, iterator): model.eval() fin_outputs = [] fin_targets = [] with torch.no_grad(): for batch in iterator: sentences = batch[0].to(device) tags = batch[1].to(device) seq_lengths = batch[2] # sentences => [batch_size, seq_len] # tags => [batch_size, seq_len] predictions = model.predict(sentences, seq_lengths) # predictions => [batch_size, seq_len] fin_outputs.extend(predictions) fin_targets.extend(tags.detach().cpu().numpy().tolist()) assert len(fin_outputs) == len(fin_targets) mlb = MultiLabelBinarizer() trans_trg = mlb.fit_transform(fin_targets) trans_pred = mlb.transform(fin_outputs) cf = metrics.classification_report(trans_trg, trans_pred) print(cf) # + id="AQOywLvR8NPF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 558} outputId="e932d1b3-de6e-4dda-9e80-767c363bbf59" # %%time cal_metrics(model, test_data_loader) # + [markdown] id="jEH5FeEBt4BY" colab_type="text" # ### Inference # + id="mWQOhx3g3lmd" colab_type="code" colab={} def inference(sentence): if isinstance(sentence, str): tokens = [words_vocab[words_vocab.START]] + sentence.split() + [words_vocab[words_vocab.END]] else: tokens = sentence # numericalize token_ids = [words_vocab[tok] for tok in tokens] # seq length sent_length = [len(token_ids)] # create tensors sent_tensor = torch.LongTensor(token_ids).to(device) sent_tensor = sent_tensor.unsqueeze(0) # sent_tensor => [1, seq_len] model.eval() with torch.no_grad(): predictions = model.predict(sent_tensor, sent_length) predictions = predictions[0] predicted_tags = [] for i in predictions: predicted_tags.append(tags_vocab.id2word(i)) return tokens, predicted_tags # + id="AUrgvPn1vj0W" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 638} outputId="8ecdf53d-3316-45d9-a08f-2a614dfa0bab" sentence = test_sentences[0] actual_tags = test_tags[0] tokens, predicted_tag_ids = inference(sentence) print("Pred. Tag\tActual Tag\tCorrect?\tToken\n") for token, pred_tag, actual_tag in zip(tokens, predicted_tag_ids, actual_tags): correct = '✔' if pred_tag == actual_tag else '✘' print(f"{pred_tag}\t\t{actual_tag}\t\t{correct}\t\t{token}") # + id="7UDJLIFLzlsN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 622} outputId="403d7988-79c4-4602-ebbf-e1baee1291c2" sentence = test_sentences[10] actual_tags = test_tags[10] tokens, predicted_tag_ids = inference(sentence) print("Pred. Tag\tActual Tag\tCorrect?\tToken\n") for token, pred_tag, actual_tag in zip(tokens, predicted_tag_ids, actual_tags): correct = '✔' if pred_tag == actual_tag else '✘' print(f"{pred_tag}\t\t{actual_tag}\t\t{correct}\t\t{token}") # + id="4jyhMAl63I7t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="7b1d6fd5-4493-4823-e624-7c686eedb16b" sentence = "I like to live in New York" tokens, predicted_tag_ids = inference(sentence) print("Pred. Tag\tToken\n") for token, pred_tag in zip(tokens[1:-1], predicted_tag_ids[1:-1]): print(f"{pred_tag}\t\t{token}") # + id="srjLeOA53aRJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="81141a1b-73a4-4222-def7-8bfa013cda29" sentence = "My dream is to work at Google" tokens, predicted_tag_ids = inference(sentence) print("Pred. Tag\tToken\n") for token, pred_tag in zip(tokens[1:-1], predicted_tag_ids[1:-1]): print(f"{pred_tag}\t\t{token}") # + id="lFkVZrIf38D3" colab_type="code" colab={}
applications/classification/ner_tagging/NER tagging with BiLSTM CRF.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 130px; display: inline" alt="INSA"/></a> # # # <a href="https://openclassrooms.com/" ><img src="https://upload.wikimedia.org/wikipedia/fr/0/0d/Logo_OpenClassrooms.png" style="float:right; max-width: 80px; display: inline" alt="open Classrooms"/> </a> # </center> # # Apprentissage Automatique: Valoriser les Données d'Objets Connectés # # [Anayse en Composantes Principales](http://wikistat.fr/pdf/st-m-explo-acp.pdf) et [Analyse Factorielle Discriminante]() avec <a href="https://www.python.org/"><img src="https://upload.wikimedia.org/wikipedia/commons/thumb/f/f8/Python_logo_and_wordmark.svg/390px-Python_logo_and_wordmark.svg.png" style="max-width: 150px; display: inline" alt="Python"/></a> & <a href="http://scikit-learn.org/stable/#"><img src="http://scikit-learn.org/stable/_static/scikit-learn-logo-small.png" style="max-width: 180px; display: inline" alt="Scikit-Learn"/></a> # **Résumé**: Ce calepin introduit l'utilisation de la librairie `scikit-learn` pour l'exploration statistique. Ceci est illustré par des exemples de mise en oeuvre de l'([ACP](http://wikistat.fr/pdf/st-m-explo-acp.pdf) et de l'[AFD]() sur des données "jouet". Il développe ensuite deux applications de l'ACP, sur des images élémentaires de caractères et enfin sur des données économiques sous une la forme particulière d'un cube ou tableau à trois indices. # ## 1 Introduction # ### 1.1 `Scikit-learn` *vs.* R # L'objectif de ce tutoriel est d'introduire l'utilisation de la librairie `scikit-learn` de Python pour l'exploration de données multidimensionnelles. Seule l'utilisation directe des fonctions d'exploration est abordée. Se pose rapidement une question: quand utiliser `scikit-learn` de Python plutôt que R plus complet pour certaines fonctionnalités et plus simple d'emploi? # # Le choix repose sur les points suivants: # - **Attention** cette librairie manipule des objets de classe `array` de `numpy` *chargés en mémoire* et donc de taille limitée par la RAM de l'ordinateur; de façon analogue R charge en RAM des objets de type `data.frame`. # - **Attention** toujours, `scikit-learn` (0.18) ne reconnaît pas (ou pas encore ?) la classe `DataFrame` de `pandas`; `scikit-learn` utilise la classe `array` de `numpy`. C'est un problème pour la gestion de variables qualitatives complexes. Une variable binaire est simplement remplacée par un codage $(0,1)$ mais, en présence de plusieurs modalités, traiter celles-ci comme des entiers n'a pas de sens statistique et remplacer une variable qualitative par l'ensemble des indicatrices (*dummy variables* $(0,1)$) de ses modalités complique l'interprétation statistique. # - Les implémentations en Python de certains algorithmes dans `scikit-learn` sont aussi efficaces (*e.g.* $k$-means), voire beaucoup plus efficaces pour des données volumineuses car utilisent implicitement les capacités de parallélisation. # - R offre beaucoup plus de possibilités pour une exploration, des recherches et comparaisons, des interprétations mais les capacités de parallélisation de Python sont nettement plus performantes. Plus précisément, l'introduction de nouvelles librairies n'est pas ou peu contraintes dans R, alors que celle de nouvelles méthodes dans `scikit-learn` se fait sous contrôle d'un groupe qui en contrôle la pertinence et l'efficacité. # # En conséquences: # - Préférer R et ses libraires si la **présentation graphique** des résultats et leur **interprétation** est prioritaire. # - Pour l'emploi de méthodes (analyse factorielle discriminante, canonique, positionnement multidimensionnel...) pas codées en Python. # - Préférer Python et `scikit-learn` pour mettre au point une chaîne de traitements (*pipe line*) opérationnelle de l'extraction à une analyse privilégiant la prévision brute à l'interprétation et pour des données quantitatives ou rendues quantitatives ("vectorisation" de corpus de textes). # # ### 1.2 Fonctions de `scikit-learn` # La communauté qui développe cette librairie est très active, elle évolue rapidement. Ne pas hésiter à consulter la [documentation](http://scikit-learn.org/stable/user_guide.html) pour des compléments. Voici une sélection de ses principales fonctionnalités. # - Transformations (standardisation, discrétisation binaire, regroupement de modalités, imputations rudimentaires de données manquantes) , "vectorisation" de corpus de textes (encodage, catalogue, Tf-idf), images. # - Exploration: ACP, classification non supervisée (mélanges gaussiens, propagation d'affinité, ascendante hiérarchique, SOM,...). Une fonction est aojutée pour l'Analyse des Correspondances. # - Modélisation et apprentissage, voir le dépôt correspondant. # # # ### 1.3 ACP avec `scikit-learn` # L'objectif est d'illustrer la mise en oeuvre de l'[analyse en composantes principales](http://wikistat.fr/pdf/st-m-explo-acp.pdf). Consulter la [documentation](http://scikit-learn.org/stable/user_guide.html) et ses nombreux [exemples](http://scikit-learn.org/stable/auto_examples/index.html) pour plus de détails sur l'utilisation de `scikit-learn`. # # La librairie `scikit-learn` a principalement été conçu en vue de l'analyse de signaux. Aussi, de nombreuses options de l'ACP ne sont pas disponibles, notamment les graphiques usuels (biplot, cercle des corrélations...). En revanche des résultats sont liés à la version probabiliste de l'ACP sous hypothèse d'une distribution gaussienne multidimensionnelle des données. # # **Attention**, l'ACP est évidemment centrée mais par réduite. L'option n'est pas prévue et les variables doivent être réduites (fonction `sklearn.preprocessing.scale`) avant si c'est nécessaire. L'attribut `transform` désigne les composantes principales, sous-entendu: transformation par réduction de la dimension; `n_components` fixe le nombre de composantes retenues, par défaut toutes; l'attribut `components_` contient les `n_components` vecteurs propres mais non normalisés, c'est-à-dire de norme carrée la valeur propre associée, et donc à utiliser pour représenter les variables. # # D'autres versions d'analyse en composantes principales sont proposées dans `Scikit-learn`: *kernel PCA, sparse PCA, ICA*... # # # Plusieurs jeux de données élémentaires sont utilisés donyt celui "jouet" déjà vu en R afin de bien comprendre les sorties proposées par la fonction disponible. L'autre ensemble de données est un problème classique et simplifié de [reconnaissance de caractères](http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits) qui est inclus dans la librairie `scikit-learn`. # # ## 2 ACP de données "jouet" # Les données sont celles de l'exemple du MOOC et aussi de l'[introduction à l'ACP](http://wikistat.fr/pdf/st-l-des-multi): les notes en maths, français, physique et anglais de 9 lycéens virtuels. Les mêmes données sont analysées avec R dans un autre [tutoriel](https://github.com/wikistat/Exploration/blob/master/TutosRudim/Cal1-R-SVDtoACP.ipynb) # Construire la matrice de notes import pandas as pd note=[[6,6,5,5.5],[8,8,8,8],[6,7,11,9.5],[14.5,14.5,15.5,15], [14,14,12,12.5],[11,10,5.5,7],[5.5,7,14,11.5],[13,12.5,8.5,9.5], [9,9.5,12.5,12]] dat=pd.DataFrame(note,index=["jean","alai","anni","moni","didi","andr","pier","brig","evel"], columns=["Math","Phys","Fran","Angl"]) dat dat.boxplot() plt.show() # Matrice des nuages de points from pandas.plotting import scatter_matrix scatter_matrix(dat, figsize=(5, 5), diagonal='kde') plt.show() # Importation des fonctions from sklearn.decomposition import PCA from sklearn.preprocessing import scale import numpy as np import matplotlib.pyplot as plt # Matrice des variances-covariances dat.cov() # Matrice des corrélations dat.corr() # ### 2.1 Valeurs propres de l'ACP non réduite # Contrairement aux habitudes de beaucoup de logiciels, l'ACP de `scikit-learn` n'est pas *réduite*. Par ailleurs, le diviseur de la formule de calcul de la variance est celui d'une estimation sans biais: $(n-1)$ au lieu de $n$. # # Calcul des valeurs propres ou variances des composantes principales. pca = PCA() valPropres=pca.fit(dat).explained_variance_ print(valPropres) # Retrouver les valeurs propres du cours. valPropres*8/9 # ### 2.2 Vecteurs propres de l'ACP non réduite # # Les vecteurs propres sont aussi les coefficients des combinaisons linéaires des variables permettant de définir les variables principales. pca.components_.T # ### 2.3 Composantes principales de l'ACP non réduite C = pca.fit(dat).transform(dat) print(C) # Distribution des composantes principales. # # **Q** Quel choix de dimension. plt.boxplot(C) plt.show() # **Q** Comparer avec les résultats du cours obtenus en R. # # Tous les autres résultats (contributions, cossinus carrés, corrélations variables facteurs...) et surtout les graphes (éboulis, plans factoriels...) sont à construire car aucune fonction n'est disponible comme dans le package `FactoMineR` de R. C'est partièlement fait dans avec l'ACP des jeux de données suivants (section 4 et 5) et complété (*biplot*) dans les calepins plus complets des autres cas d'usage disponibles sur sur le dépôt [`github/wikistat`](https://github.com/wikistat/). # # ### 2.4 Représentation des individus plt.figure(figsize=(8,5)) for i, j, nom in zip(C[:,0], C[:,1], dat.index): plt.text(i, j, nom, fontsize=16) plt.axis((-10,12,-6,8)) plt.show() # **Attention** le signe d'un vectreur propre n'est pas pas déterminé car c'est une direciton ou sous-espace qui est "propre" pour une matrice. En fonction de l'algorithme ou logiciel utilisé, le vecteur peut être orienté dans un sens ou dans l'autre mais c'est bien la même direction qui est définie. Cela n'a aucune influence sur l'interprétation de ces résultats. # # ### 2.5 Graphe des variables # Calcul des coordonnées et réprésentation des variables coord1=pca.components_[0]*np.sqrt(pca.explained_variance_[0]) coord2=pca.components_[1]*np.sqrt(pca.explained_variance_[1]) fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(1, 1, 1) for i, j, nom in zip(coord1,coord2, dat.columns): plt.text(i, j, nom, fontsize=16) plt.arrow(0,0,i,j,color='black') plt.axis((-4,4,-4,4)) plt.show() # Le cercle des corrélations n'est pas tracé car les variables ne osnt pas réduites. # ## 3 AFD de données "jouet" # L'objectif de cet exemple sur des données très simples est de montrer l'intérêt d'une AFD par rapport à une ACP lorsque des classes des individus sont connues. # # ### 3.1 Lecture des données # Les données sont sont obtenues par l'observation de 6 variables, des dimensions d'ailes, élitres, antennes, pattes de 3 classes d'insectes. Il s'agit donc de vérifier graphiquement la bonne capacité de ces variables à distinguer ou discriminer ces trois classes. insect=pd.read_table("https://www.math.univ-toulouse.fr/~besse/Wikistat/Data/lubisch.txt",sep='\s+',header=0) insect.head() # ### 3.2 ACP # **Q** Quel est le graphique ci-dessous? Comment choisir le nombre de composantes? from sklearn.preprocessing import scale X=scale(insect[["X1","X2","X3","X4","X5","X6"]]) Y=insect["Y"] C = pca.fit(X).transform(X) plt.boxplot(C) plt.show() # #### Graphique des individus plt.figure(figsize=(10,6)) for i, j, nom in zip(C[:,0],C[:,1], Y): plt.scatter(i, j, color=nom) plt.axis((-5,7,-4,4)) plt.show() # **Q** Commenter la forme des trois nuages de points, la déjà bonne séparation des classes. # # #### Graphique des variables coord1=pca.components_[0]*np.sqrt(pca.explained_variance_[0]) coord2=pca.components_[1]*np.sqrt(pca.explained_variance_[1]) fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(1, 1, 1) for i, j, nom in zip(coord1,coord2, insect.columns): plt.text(i, j, nom, fontsize=16) plt.arrow(0,0,i,j,color='black') plt.axis((-1.2,1.2,-1.2,1.2)) # cercle c=plt.Circle((0,0), radius=1, color='gray', fill=False) ax.add_patch(c) plt.show() # **Q** Commenter la structure de corrélation des variables, la qualité de représentation. # # ### 3.3 AFD # # # L'AFD est l'ACP des barycentres des classes avec la métrique de Mahalanobis dans l'espace des individus. # # **Q** Comment cette métrique est-elle définie? from sklearn.discriminant_analysis import LinearDiscriminantAnalysis method = LinearDiscriminantAnalysis() lda=method.fit(X,Y) # **Q** Que devient la forme des nuages? A quoi cela est-il dû? Commenter la séparation des classes. Clda=lda.transform(X) plt.figure(figsize=(10,6)) for i, j, nom in zip(Clda[:,0],Clda[:,1], Y): plt.scatter(i, j, color=nom) plt.axis((-8,7,-4,6)) plt.show() # **Remarque**: # Les deux exemples qui suivents proposent des applications de l'ACP à d'autres jeux de données afin de montrer les capacités de cette méthode d'exploration multidimensionnelle. Ce sont des compléments intéressants mais pas indispensables à la bonne compréhension de la suite du cours. # # ## 4 ACP des données "Caractères" # # # Il s'agit d'explorer les données issues de la pixellisation de tracés de caractères dont les procédés d'obtention et prétraitement sont décrits sur le [site de l'UCI](http://archive.ics.uci.edu/ml/datasets/Pen-Based+Recognition+of+Handwritten+Digits) (Lichman, 2013). Les chiffres ont été saisies sur des tablettes à l'intérieur de cadres de résolution $500\times 500$. Des procédures de normalisation, ré-échantillonnage spatial puis de lissage ont été appliquées. Chaque caractère apparaît finalement discrétisé sous la forme d'une matrice $8\times 8$ de pixels à 16 niveaux de gris et identifié par un label. Les données sont archivées sous la forme d'une matrice ou tableau à trois indices. Elles sont également archivées après vectorisation des images sous la forme d'une matrice à $p=64$ colonnes. # # L'étude du même type de données, mais nettement plus complexes (MNIST): 60 000 caractères représentés par des images de 784 pixels (26 $\times$ 26) fait l'objet d'un autre calepin. # # ### 4.1 Prise en main des données # Importations import matplotlib.pyplot as plt from sklearn import datasets # %matplotlib inline # les données présentes dans la librairie digits = datasets.load_digits() # Contenu et mode d'obtention print(digits) # Dimensions digits.images.shape # Sous forme d'un cube d'images 1797 x 8x8 print(digits.images) # Sous forme d'une matrice 1797 x 64 print(digits.data) # Label réel de chaque caractère print(digits.target) # Voici un aperçu des empilements des images à décrire puis ensuite en principe à discriminer: images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:8]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Chiffres: %i' % label) # ### 4.2 Analyse en composantes principales from sklearn.decomposition import PCA X=digits.data y=digits.target target_name=[0,1,2,3,4,5,6,7,8,9] # définition de la commande pca = PCA() # Estimation, calcul des composantes principales C = pca.fit(X).transform(X) # Décroissance de la variance expliquée plt.plot(pca.explained_variance_ratio_) plt.show() # Diagramme boîte des premières composantes principales. plt.boxplot(C[:,0:20]) plt.show() # **Q** Quelle dimension retenir en principe? # # Représentation des caractères dans le premier plan principal. # # La représentation des variables (pixels) et le *biplot* n'ont pas grand intérêt pour ces données. plt.scatter(C[:,0], C[:,1], c=y, label=target_name) plt.show() # Le même graphique avec une légende mais moins de couleurs. # attention aux indentations plt.figure() for c, i, target_name in zip("rgbcmykrgb",[0,1,2,3,4,5,6,7,8,9], target_name): plt.scatter(C[y == i,0], C[y == i,1], c=c, label=target_name) plt.legend() plt.title("ACP Digits") plt.show() # Graphique en trois dimensions. from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) ax.scatter(C[:, 0], C[:, 1], C[:, 2], c=y, cmap=plt.cm.Paired) ax.set_title("ACP: trois premieres composantes") ax.set_xlabel("Comp1") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("Comp2") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("Comp3") ax.w_zaxis.set_ticklabels([]) plt.show() # ## 5 Données "cubiques" de l'OCDE # ### 5.1 Introduction # #### Objectif # L'objectif de cette section est l'exploration de données socio-économiques plus complexes. La principale spécificité de ces données est de se présenter sous la forme d'un cube de données ou tableau à trois entrées: le numéro de ligne, le numéro de variable et l'année d'observation de cette variable. Après une description classique, la mise en oeuvre de l'analyse en composantes principales avec python nécessite un effort particulier afin de produire les graphes adaptés à la structure particulière des données. # # #### Les données # Les données sont issues de l'Observatoire de l'OCDE. Pour chaque pays membre et pour chacune des années 1975, 1977, 1979, 1981, on connaît les valeurs prises par les variables suivantes qui sont toutes des \emph{taux}~: # - Taux brut de natalité, # - Taux de chômage, # - Pourcentage d'actifs dans le secteur primaire, # - Pourcentage d'actifs dans le secteur secondaire, # - produit intérieur brut (par habitant), # - Formation brute de capital fixe (par habitant), # - Hausse des prix, # - Recettes courantes (par habitant), # - Mortalité infantile, # - Consommation de protéines animales (par habitant), # - Consommation d'énergie (par habitant). # # Elles sont disponibles dans le fichier: `ocdeR.dat`. # # Les mêmes variables sont donc observées, sur les mêmes pays ou individus à quatre dates différentes. Plusieurs stratégies d'analyse sont possibles (tableau moyen, tableaux concaténés, meilleur compromis ou double ACP). La plus adaptée pour ces données est de considérer les observations des variables pour chacun des individus: pays $\times$ années. # # # Importation des principals librairies et # Affichage des graphiques dans le notebook # %matplotlib inline import numpy as np import pandas as pd import matplotlib.pyplot as plt # ### 5.2 Lecture des données ocde=pd.read_csv("https://www.math.univ-toulouse.fr/~besse/Wikistat/Data/ocde.txt",sep='\s+',header=0) ocde.head() # ### 5.3 Statistiques élémentaires # Consulter rapidement ces résultats; Que dire à propos de la symétrie des distributions, de leur normalité, des valeurs atypiques. ocde.mean() ocde["CNRJ"].hist(bins=20) plt.show() from pandas.plotting import scatter_matrix scatter_matrix(ocde, alpha=0.2, figsize=(15, 15), diagonal='kde') plt.show() # **Q** Quel est le graphique ci-dessous? Que représentent les blocs dagonaux? Que dire des structures de corrélation? # ### 5.4 [Analyse en composantes principales](http://wikistat.fr/pdf/st-m-explo-acp.pdf) # Chaque pays étant observé 4 fois, la principale difficulté technique est de faire apparaître cette structure chronologique dans les graphique afin d'illustrer la dynamique économique de la période considérée. # # **Q** Justifier la nécessité de réduire. # # **Q** Pourquoi toutes les variables sont des taux? # # #### Choix de dimension from sklearn.decomposition import PCA from sklearn.preprocessing import scale # réduction ocdeS=scale(ocde) pca = PCA() cpOcde = pca.fit_transform(ocdeS) # Eboulis plt.plot(pca.explained_variance_ratio_) plt.show() plt.boxplot(cpOcde) plt.show() # **Q** Quel est le graphe ci-dessus. Que dire de la première composante? Quelle dimension choisir? # # #### Représentation des variables coord1=pca.components_[0]*np.sqrt(pca.explained_variance_[0]) coord2=pca.components_[1]*np.sqrt(pca.explained_variance_[1]) fig = plt.figure(figsize=(5,5)) ax = fig.add_subplot(1, 1, 1) for i, j, nom in zip(coord1,coord2, ocde.columns): plt.text(i, j, nom) plt.arrow(0,0,i,j,color='black') plt.axis((-1.2,1.2,-1.2,1.2)) # cercle c=plt.Circle((0,0), radius=1, color='gray', fill=False) ax.add_patch(c) plt.show() # **Q** Interpréter chacun des deux premiers axes. # # **Exo** représenter le plan (2,3) et interpréter le 3ème axe. # #### Représentation basique des individus plt.figure(figsize=(10,6)) for i, j, nom in zip(cpOcde[:,0], cpOcde[:,1], ocde.index): # color = int(i/4) plt.text(i, j, nom ,color="blue") plt.axis((-5,7,-4,4)) plt.show() # #### Représentation adaptée à ces données # La structure particulière des données nécessite un graphique adapté. Ceci est en fait le **principal objectif** d'une *bonne exploration des données*: trouver la **représentation graphique** qui permet d'en comprendre toute la structure en une seule vue. # + import matplotlib.patheffects as PathEffects comp_0 = 0 comp_1 = 1 cmap = plt.get_cmap("tab20") fig = plt.figure(figsize=(16,8)) ax = fig.add_subplot(1,1,1) for i,k in enumerate(np.arange(0,cpOcde.shape[0],4)): country =ocde.index[k] xs = cpOcde[k:k+4,comp_0] ys = cpOcde[k:k+4, comp_1] ax.plot(xs,ys, color=cmap(i), marker=".", markersize=15) txt = ax.text(xs[-4], ys[-4], country, horizontalalignment="left", verticalalignment="top", color=cmap(i), fontweight="bold", fontsize=15) # Add black line around text #txt.set_path_effects([PathEffects.withStroke(linewidth=1, foreground='black')]) ax.set_xlabel("PC%d" %comp_0, fontsize=20) ax.set_ylabel("PC%d" %comp_1, fontsize=20) plt.tight_layout() plt.show() # - # **Q** Analyser les évolutions des économies des différents pays. Les replacer dans la période considérée (chocs pétroliers).
ML4IoT-Intro-ACP-AFD.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Vehicle Detection # # # The goals / steps of this project are the following: # # * Perform a Histogram of Oriented Gradients (HOG) feature extraction on a labeled training set of images and train a classifier Linear SVM classifier # * Optionally, you can also apply a color transform and append binned color features, as well as histograms of color, to your HOG feature vector. # * Note: for those first two steps don't forget to normalize your features and randomize a selection for training and testing. # * Implement a sliding-window technique and use your trained classifier to search for vehicles in images. # * Run your pipeline on a video stream (start with the test_video.mp4 and later implement on full project_video.mp4) and create a heat map of recurring detections frame by frame to reject outliers and follow detected vehicles. # * Estimate a bounding box for vehicles detected. # # --- # ## Import Packages # + #importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 import glob import pickle import sys, os, time from skimage.feature import hog from sklearn.svm import LinearSVC from sklearn.preprocessing import StandardScaler from sklearn.cross_validation import train_test_split from scipy.ndimage.measurements import label # %matplotlib inline # - # ## Initialization # + global show_debug_info show_debug_info = True param_hog = {'orient': 9, 'ppc': 8, 'cpb': 2, 'channel': 0} param_spatial_bin = {'color': 'LUV', 'size': (16, 16)} # y_range = [[[0, 1280], [400, 650]]] # xy_windows_multi_scale = [(128, 128)] y_range = [[[20, 600], [400, 650]], [[680, 1260], [400, 650]], [[480, 800], [400, 560]]] xy_windows_multi_scale = [(90, 90), (90, 90), (128, 128)] # + # Extract HoG def get_hog_feature(img, orient, ppc, cpb, vis=True, feature_vec=True): return_list = hog(img, orientations=orient, pixels_per_cell=(ppc, ppc), cells_per_block=(cpb, cpb), block_norm='L2', transform_sqrt=False, visualise=vis, feature_vector=feature_vec) if vis: hog_features = return_list[0] hog_image = return_list[1] return hog_features, hog_image else: hog_features = return_list return hog_features # Extract spatial bins def bin_spatial(img, color_space='RGB', size=(32, 32)): # Use cv2.resize().ravel() to create the feature vector features = cv2.resize(img, size).ravel() # Return the feature vector return features def color_hist(img, nbins=32, bins_range=(0, 256)): # Compute the histogram of the color channels separately channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range) channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range) channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range) # Concatenate the histograms into a single feature vector hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) # Return the individual histograms, bin_centers and feature vector return hist_features # Extract features def FeatureExtraction(img, hog_params, bin_spatial_params): # convert color color_space = bin_spatial_params['color'] if color_space != 'RGB': if color_space == 'HSV': img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) elif color_space == 'LUV': img = cv2.cvtColor(img, cv2.COLOR_RGB2LUV) elif color_space == 'HLS': img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS) elif color_space == 'YUV': img = cv2.cvtColor(img, cv2.COLOR_RGB2YUV) elif color_space == 'YCrCb': img = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) else: img = np.copy(img) # hog channel = hog_params['channel'] f_hog = [] if channel == 'ALL': fs_hog = [] for c in range(img.shape[2]): f_hog, img_hog = get_hog_feature(img[:,:,c], hog_params['orient'], hog_params['ppc'], hog_params['cpb']) fs_hog.append(f_hog) f_hog = np.ravel(fs_hog) else: f_hog, img_hog = get_hog_feature(img[:,:,channel], hog_params['orient'], hog_params['ppc'], hog_params['cpb']) # 'HSV', size=(16, 16) f_spatial = bin_spatial(img, bin_spatial_params['color'], size=bin_spatial_params['size']) hist_color = color_hist(img) features = [] features.append(f_hog) features.append(f_spatial) features.append(hist_color) return np.concatenate(features) # features = np.concatenate((f_hog, f_spatial, hist_color)) # return features # + # Define a function that takes an image, # start and stop positions in both x and y, # window size (x and y dimensions), # and overlap fraction (for both x and y) def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], xy_window=(64, 64), xy_overlap=(0.5, 0.5)): # If x and/or y start/stop positions not defined, set to image size if x_start_stop[0] == None: x_start_stop[0] = 0 if x_start_stop[1] == None: x_start_stop[1] = img.shape[1] if y_start_stop[0] == None: y_start_stop[0] = 0 if y_start_stop[1] == None: y_start_stop[1] = img.shape[0] # Compute the span of the region to be searched xspan = x_start_stop[1] - x_start_stop[0] yspan = y_start_stop[1] - y_start_stop[0] # Compute the number of pixels per step in x/y nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0])) ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1])) # Compute the number of windows in x/y nx_buffer = np.int(xy_window[0]*(xy_overlap[0])) ny_buffer = np.int(xy_window[1]*(xy_overlap[1])) nx_windows = np.int((xspan-nx_buffer)/nx_pix_per_step) ny_windows = np.int((yspan-ny_buffer)/ny_pix_per_step) # Initialize a list to append window positions to window_list = [] # Loop through finding x and y window positions # Note: you could vectorize this step, but in practice # you'll be considering windows one by one with your # classifier, so looping makes sense for ys in range(ny_windows): for xs in range(nx_windows): # Calculate window position startx = xs*nx_pix_per_step + x_start_stop[0] endx = startx + xy_window[0] starty = ys*ny_pix_per_step + y_start_stop[0] endy = starty + xy_window[1] # Append window position to list window_list.append(((startx, starty), (endx, endy))) # Return the list of windows return window_list # Define a function you will pass an image # and the list of windows to be searched (output of slide_windows()) def FindCarCandidates(img, windows, clf, scaler): #1) Create an empty list to receive positive detection windows on_windows = [] #2) Iterate over all windows in the list for window in windows: #3) Extract the test window from original image # print(window) test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) #4) Extract features for that window using single_img_features() features = FeatureExtraction(test_img, param_hog, param_spatial_bin) #5) Scale extracted features to be fed to classifier test_features = scaler.transform(np.array(features).reshape(1, -1)) #6) Predict using your classifier prediction = clf.predict(test_features) #7) If positive (prediction == 1) then save the window if prediction == 1: # print('matched!') on_windows.append(window) #8) Return windows for positive detections return on_windows # Define a function to draw bounding boxes def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6): # Make a copy of the image imcopy = np.copy(img) # Iterate through the bounding boxes for bbox in bboxes: # Draw a rectangle given bbox coordinates cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick) # Return the image copy with boxes drawn return imcopy # + # Use heat map to filter out obejct suddenly appear def add_heat(heatmap, bbox_list): # Iterate through list of bboxes for box in bbox_list: # Add += 1 for all pixels inside each bbox # Assuming each "box" takes the form ((x1, y1), (x2, y2)) heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 # Return updated heatmap return heatmap def apply_threshold(heatmap, threshold): # Zero out pixels below the threshold heatmap[heatmap <= threshold] = 0 # Return thresholded map return heatmap def draw_labeled_bboxes(img, labels): # Iterate through all detected cars for car_number in range(1, labels[1]+1): # Find pixels with each car_number label value nonzero = (labels[0] == car_number).nonzero() # Identify x and y values of those pixels nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) # Define a bounding box based on min/max x and y bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) # Draw the box on the image cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) # Return the image return img # - def Process(image, fg_ret_heatmap=False): # Input: RGB image # multi scale windows windows_multi_scale = [] for i in range(len(y_range)): windows = slide_window(image, x_start_stop=y_range[i][0], y_start_stop=y_range[i][1], xy_window=xy_windows_multi_scale[i], xy_overlap=(0.5, 0.5)) # print(len(windows)) windows_multi_scale += windows # print(len(windows_multi_scale)) global X_scaler hot_windows = FindCarCandidates(image, windows_multi_scale, svc, X_scaler) # a. no heatmap window_img_a = draw_boxes(image, hot_windows, color=(0, 0, 255), thick=6) heat = np.zeros_like(img[:,:,0]).astype(np.float) # Add heat to each box in box list heat = add_heat(heat, hot_windows) # Apply threshold to help remove false positives heat = apply_threshold(heat, 0) # Visualize the heatmap when displaying heatmap = np.clip(heat, 0, 255) # Find final boxes from heatmap using label function labels = label(heatmap) # b. no heatmap window_img = draw_labeled_bboxes(np.copy(image), labels) # debug if show_debug_info: fig = plt.figure() plt.subplot(141) plt.imshow(window_img) plt.title('Car Positions') plt.subplot(142) plt.imshow(window_img_a) plt.title('Detected Objects') plt.subplot(143) plt.imshow(heatmap, cmap='hot') plt.title('Heat Map') plt.subplot(144) plt.imshow(window_img) plt.title('Filtered Objects') fig.tight_layout() if fg_ret_heatmap is True: return window_img, heatmap, window_img_a else: return window_img # ## Load Dataset img_filename_v = glob.glob('./dataset/vehicles/**/*.png') img_filename_nv = glob.glob('./dataset/non-vehicles/**/*.png') print('Dataset size of vehicel:', len(img_filename_v)) print('Dataset size of non-vehicel:', len(img_filename_nv)) # + features_v = [] features_nv = [] X_train = [] X_test = [] y_train = [] y_test = [] print('Vehicle') for filename in img_filename_v: # print(filename) img = cv2.imread(filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) features = FeatureExtraction(img, param_hog, param_spatial_bin) features_v.append(features) print('Non-vehicle') for filename in img_filename_nv: # print(filename) img = cv2.imread(filename) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) features = FeatureExtraction(img, param_hog, param_spatial_bin) features_nv.append(features) print('Feature vector length:', len(features_v[0])) X = np.vstack((features_v, features_nv)).astype(np.float64) y = np.hstack((np.ones(len(features_v)), np.zeros(len(features_nv)))) # Default shuffle data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) print("Amount:\n", "\tTraining dataset:", len(X_train), "\n\tTest dataset:", len(X_test)) # - # ## Train # + # Fit a per-column scaler only on the training data global X_scaler plt.show() plt.plot(X_train[0]) X_scaler = StandardScaler().fit(X_train) # Apply the scaler to X_train and X_test X_train = X_scaler.transform(X_train) X_test = X_scaler.transform(X_test) plt.show() plt.plot(X_train[0]) # Use a linear SVC svc = LinearSVC() # Check the training time for the SVC t=time.time() svc.fit(X_train, y_train) t2 = time.time() print(round(t2 - t, 2), 'Seconds to train SVC...') # Check the score of the SVC print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4)) # Check the prediction time for a single sample t=time.time() n_predict = 10 print('My SVC predicts:\t', svc.predict(X_test[0:n_predict])) print('For these',n_predict, 'labels:\t', y_test[0:n_predict]) t2 = time.time() print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC.') # - # ## Test # + test_imgs = sorted(glob.glob('./test_images/t/*.jpg')) for img in test_imgs: print(img) img = cv2.imread(img) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) heat = np.zeros_like(img[:,:,0]).astype(np.float) window_img, heatmap, window_img_a = Process(img, fg_ret_heatmap=True) # + # show image of 10 random data points row = 2 col = 5 num = row * col index = np.random.randint(0, high=len(img_filename_v), size=num) fig, axs = plt.subplots(row, col, figsize=(15, 6)) fig.subplots_adjust(hspace = .2, wspace=.001) axs = axs.ravel() for i in range(num): image = cv2.imread(img_filename_v[index[i]]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) axs[i].axis('off') axs[i].imshow(image) axs[i].set_title(index[i]) plt.show() #hog fig, axs = plt.subplots(row, col, figsize=(15, 6)) fig.subplots_adjust(hspace = .2, wspace=.001) axs = axs.ravel() for i in range(num): image = cv2.imread(img_filename_v[index[i]]) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) features, img_hog = get_hog_feature(image, 9, 8, 2) axs[i].axis('off') axs[i].imshow(img_hog) axs[i].set_title(index[i]) plt.show() print() index = np.random.randint(0, high=len(img_filename_nv), size=num) fig, axs = plt.subplots(row, col, figsize=(15, 6)) fig.subplots_adjust(hspace = .2, wspace=.001) axs = axs.ravel() for i in range(num): image = cv2.imread(img_filename_nv[index[i]]) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) axs[i].axis('off') axs[i].imshow(image) axs[i].set_title(index[i]) plt.show() #hog fig, axs = plt.subplots(row, col, figsize=(15, 6)) fig.subplots_adjust(hspace = .2, wspace=.001) axs = axs.ravel() for i in range(num): image = cv2.imread(img_filename_nv[index[i]]) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) features, img_hog = get_hog_feature(image, 9, 8, 2) axs[i].axis('off') axs[i].imshow(img_hog) axs[i].set_title(index[i]) plt.show() # - # --- # ## Video Part show_debug_info = False # Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML output_dir = 'test_videos_output/' if not os.path.exists(output_dir): os.makedirs(output_dir) video_name = 'test_video.mp4' white_output = output_dir + video_name ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ## clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("./" + video_name) white_clip = clip1.fl_image(Process) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output)) video_name = 'project_video.mp4' white_output = output_dir + video_name ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ## clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("./" + video_name) white_clip = clip1.fl_image(Process) #NOTE: this function expects color images!! # %time white_clip.write_videofile(white_output, audio=False) HTML(""" <video width="960" height="540" controls> <source src="{0}"> </video> """.format(white_output))
P5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4 # # Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment. # # This assignment requires that you to find **at least** two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of **economic activity or measures** (see below) for the region of **Ann Arbor, Michigan, United States**, or **United States** more broadly. # # You can merge these datasets with data from different regions if you like! For instance, you might want to compare **Ann Arbor, Michigan, United States** to Ann Arbor, USA. In that case at least one source file must be about **Ann Arbor, Michigan, United States**. # # You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property. # # Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like! # # As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight. # # Here are the assignment instructions: # # * State the region and the domain category that your data sets are about (e.g., **Ann Arbor, Michigan, United States** and **economic activity or measures**). # * You must state a question about the domain category and region that you identified as being interesting. # * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages. # * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness. # * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question. # # What do we mean by **economic activity or measures**? For this category you might look at the inputs or outputs to the given economy, or major changes in the economy compared to other regions. # # ## Tips # * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources. # * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources. # * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data. # * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students! # # ## Example # Looking for an example? Here's what our course assistant put together for the **Ann Arbor, MI, USA** area using **sports and athletics** as the topic. [Example Solution File](./readonly/Assignment4_example.pdf) # + import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib notebook # use the 'seaborn-colorblind' style plt.style.use('seaborn-colorblind') # + #Read Data Files from FRED #Read Ann Arbor Michigan Housing Price Index File dfaam = pd.read_csv('Ann_Arbor.csv') dfaam.rename(columns={"DATE": "Date", "ATNHPIUS11460Q": "HPI-Ann Arbor"}, inplace=True) dfaam.set_index('Date', inplace=True) #Read Chapel Hill Housing Price Index File dfcha = pd.read_csv('Chapel_Hill.csv') dfcha.rename(columns={"DATE": "Date", "ATNHPIUS20500Q": "HPI-Chapel Hill"}, inplace=True) dfcha.set_index('Date', inplace=True) #Read Berkeley Housing Price Index File dfber = pd.read_csv('Berkeley.csv') dfber.rename(columns={"DATE": "Date", "ATNHPIUS36084Q": "HPI-Berkeley"}, inplace=True) dfber.set_index('Date', inplace=True) # + ax = dfaam.plot.line(figsize=(10,9), title="Comparing House Price Index, 1995-2019\n Relative to 1995 (1995=100)") #dflas.plot.line(ax=ax) dfcha.plot.line(ax=ax) dfber.plot.line(ax=ax) ax.axvspan(46,69,color='lightgrey', alpha=0.5) ax.text(48, 380, 'Housing Market Crash\n (2006 - 2012)') # -
02_Applied Plotting and Charting Data Representation in Python/Week_4/Assignment4_LZ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titanic - P(deceased) - <NAME> # ## Import libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline # ## Read Data train = pd.read_csv('DA_titanic_train.csv') # Checking data train.head() # ## Explore Data sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') sns.set_style('whitegrid') sns.countplot(x='Survived',hue='Pclass',data=train) sns.set_style('whitegrid') sns.countplot(x='Survived',data=train,palette='RdBu_r') sns.set_style('whitegrid') sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r') sns.set_style('whitegrid') sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow') train['Age'].hist(bins=30,color='darkred',alpha=0.9) sns.countplot(x='SibSp',data=train) # ## Cufflinks for plots import cufflinks as cf cf.go_offline() train['Fare'].iplot(kind='hist',bins=30,color='green') # ___ # ## Data Cleaning # Filling in missing age data instead of just dropping the missing age data rows. # Checking the average age by passenger class and using that to fill in missing ones. plt.figure(figsize=(12, 7)) sns.boxplot(x='Pclass',y='Age',data=train,palette='winter') # We can see the wealthier passengers in the higher classes tend to be older, which makes sense. I'll use these average age values to impute based on Pclass for Age. def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age train['Age'] = train[['Age','Pclass']].apply(impute_age,axis=1) # Now let's check that heat map again! sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') # Dropping Cabin column and the row in Embarked that is NaN. train.drop('Cabin',axis=1,inplace=True) sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') train.head() train.dropna(inplace=True) sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis') # ## Converting Categorical Features # # We'll need to convert categorical features to dummy variables, otherwise our machine learning algorithm won't be able to directly take in those features as inputs. train.info() sex = pd.get_dummies(train['Sex'],drop_first=True) embark = pd.get_dummies(train['Embarked'],drop_first=True) sex = pd.get_dummies(train['Sex']) sex train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True) train = pd.concat([train,sex,embark],axis=1) train.head() train.drop(['female'],axis=1,inplace=True) train.head() # ## Clean test set # Cleaning test set similar to train set test = pd.read_csv('DA_titanic_test.csv') test.info test.head() sns.heatmap(test.isnull(),yticklabels=False,cbar=False,cmap='viridis') test['Age'] = test[['Age','Pclass']].apply(impute_age,axis=1) test.drop('Cabin',axis=1,inplace=True) # test.dropna(inplace=True) # ## Converting categorical features sex = pd.get_dummies(test['Sex'],drop_first=True) embark = pd.get_dummies(test['Embarked'],drop_first=True) test.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True) test = pd.concat([test,sex,embark],axis=1) test.head() # # Building a Logistic Regression model # # # ## Train Test Split X_train = train.drop('Survived',axis=1) X_train.head() X_test = test X_test.head() y_train = train['Survived'] y_train.head() # ## Training and Predicting from sklearn.linear_model import LogisticRegression LogisticRegression(max_iter=30000) logmodel = LogisticRegression() logmodel.fit(X_train,y_train) predictions = logmodel.predict(X_test) predictions.size
Titanic-SZ.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # #### boolean index test_array = np.array([1, 4, 0, 2, 3, 8, 9, 7], float) test_array > 3 test_array[test_array > 3] condition = test_array < 3 test_array[condition] A = np.array([ [12, 13, 14, 12, 16, 14, 11, 10, 9], [11, 14, 12, 15, 15, 16, 10, 12, 11], [10, 12, 12, 15, 14, 16, 10, 12, 12], [ 9, 11, 16, 15, 14, 16, 15, 12, 10], [12, 11, 16, 14, 10, 12, 16, 12, 13], [10, 15, 16, 14, 14, 14, 16, 15, 12], [13, 17, 14, 10, 14, 11, 14, 15, 10], [10, 16, 12, 14, 11, 12, 14, 18, 11], [10, 19, 12, 14, 11, 12, 14, 18, 10], [14, 22, 17, 19, 16, 17, 18, 17, 13], [10, 16, 12, 14, 11, 12, 14, 18, 11], [10, 16, 12, 14, 11, 12, 14, 18, 11], [10, 19, 12, 14, 11, 12, 14, 18, 10], [14, 22, 12, 14, 11, 12, 14, 17, 13], [10, 16, 12, 14, 11, 12, 14, 18, 11]]) B = A < 15 B.astype(np.int) # #### fancy index a = np.array([2, 4, 6, 8], float) a a[a>4] b = np.array([0, 0, 1, 3, 2, 1], int) a[b] a.take(b) #take 함수: bracket index와 같은 효과 a = np.array([[1, 4], [9, 16]], float) b = np.array([0, 0, 1, 1, 0], int) c = np.array([0, 1, 1, 1, 1], int) a[b,c] # b를 row index, c를 column index로 변환하여 표시함(행렬처럼) a = np.array([[1, 4], [9, 16]], float) a[b]
Python/ML_basic/2.Data Handling/2.Numpy/8_boolean_fancy_index.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Docstrings and annotations # ## Docstrings def func(a, b): "Returns the sum of two numbers" return a + b help(func) # We can define help stirngs using docstrings and annotations. func.__doc__ # ## Annotations # # We can also add metadata annotations to a function's parameters and return. These metadata annotations can be any expression (string, type, function call, etc) def fact(n: 'int >= 0')->int: '''Calculates n! (factorial function) Inputs: n: non-negative integer Returns: the factorial of n ''' if n < 1: 'Note that this is not part of the docstring!' return 1 else: return n * fact(n-1) fact.__annotations__ help(fact) # Annotations will work with default parameters too: just specify the default after the annotation: def my_func(a:str='a', b:int=1)->str: return a*b my_func() # # Lambda expressions lambda x: x # It creates a function. # ## Passing as an argument def apply_func(fn, *args, **kwargs): return fn(*args, **kwargs) apply_func(lambda x, y: x * y, 2, 3) apply_func(sum, (2, 3)) # ## Lambdas and sorting l = ['a', 'B', 'c', 'D'] sorted(l) # As you can see there is a difference between upper and lower-case characters when sorting strings. sorted(l, key = lambda x:x.upper()) l = ['apple', 'mango', 'banana', 'pear'] # Sort l based on the last letter of the word. sorted(l, key = lambda x: x[-1]) # # Function introspection import inspect def func(astr): return 2*astr inspect.isfunction(func) inspect.ismethod(func) # There is a difference between a function and a method! A method is a function that is bound to some object class MyClass: def f_instance(self): pass @classmethod def f_class(cls): pass @staticmethod def f_static(): pass # **Instance methods** are bound to the **instance** of a class (not the class itself) # # **Class methods** are bound to the **class**, not instances # # **Static methods** are not bound either to the class or its instances print(inspect.isfunction(MyClass.f_instance), inspect.ismethod(MyClass.f_instance)) print(inspect.isfunction(MyClass.f_class), inspect.ismethod(MyClass.f_class)) print(inspect.isfunction(MyClass.f_static), inspect.ismethod(MyClass.f_static)) # # Map, Filter, Zip # ## Map map(lambda x: x+1, [1, 2, 3]) list(map(lambda x: x+1, [1, 2, 3])) # ## Filter list(filter(lambda x: x % 2 == 0, [3, 5, 6])) # ## Zip list(zip(range(5), ['a'] * 5, ['app', 'bpp', 'cpp'])) # # Partial  # # Use case: # 1. suppose we have points (represented as tuples), and we want to sort them based on the distance of the point from some other fixed point from functools import partial origin = (0, 0) l = [(1,1), (0, 2), (-3, 2), (0,0), (10, 10)] dist_sq = lambda x, y: (x[0] - y[0])**2 + (x[1] - y[1])**2 dist_sq((0, 0), (1, 1)) sorted(l, key = partial(dist_sq, (0,0))) # # Operator # # The operator module exports a set of efficient functions corresponding to the intrinsic operators of Python. import operator dir(operator) # More info can be found [here](https://docs.python.org/3/library/operator.html)
deep_dive/1_functional/3_first_class_functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parte 1: Clusterização # Considere o conjunto de dados sobre clientes de um shopping disponibilizado em: # # https://raw.githubusercontent.com/tirthajyoti/Machine-Learning-with-Python/master/Datasets/Mall_Customers.csv # # Esse conjunto de dados é composto pelas variáveis CustomerID, Gender, Age, Annual Income (k$), Spending Score (1-100). Suponha que o seu trabalho como analista de dados seja encontrar perfis de consumidores. # # Considerando as variáveis numéricas, faça a clusterização dos dados, encontrando a quantidade ótima de clusters. # # Mostre os valores dos atributos dos centróides de cada cluster. # # Que observações podem ser feitas sobre cada cluster? Ou seja, que tipo de cliente cada cluster representa. Discuta. # # Extra: crie scatterplots com os dados clusterizados. Considere fazer gráficos dois-a-dois (use a cor para representar gênero): # # Annual Income (k$) x Spending Score (1-100) # # Annual Income (k$) x Age # # Age x Spending Score (1-100) import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df_shop = pd.read_csv("https://raw.githubusercontent.com/tirthajyoti/Machine-Learning-with-Python/master/Datasets/Mall_Customers.csv") # 1. ID do cliente: É a ID exclusiva fornecida a um cliente # 2. Sexo: Sexo do cliente # 3. Idade: A idade do cliente # 4. Renda anual (k $): É a receita anual do cliente # 5. Pontuação de gastos: É a pontuação (em 100) dada a um cliente pelas autoridades do shopping, com base no dinheiro gasto e no comportamento do cliente. df_shop.describe() df_shop.info() df_shop_focus = df_shop[['Age','Annual Income (k$)','Spending Score (1-100)']] import matplotlib.pyplot as plt import seaborn as sns plt.figure(figsize=(7,5)) sns.heatmap(df_shop_focus.corr(), annot = True, fmt = '.2f',#deixando em duas casas decimais cmap='Blues') plt.title('Correlação entre variáveis dos clientes do shopping') plt.show() #Visualizando a distribuição de idades dos clientes plt.figure(figsize=(25,8)) sns.countplot(x="Age",data=df_shop_focus,palette="pastel") plt.show() #Visualizando a renda anual dos clientes plt.figure(figsize=(25,8)) sns.countplot(x="Annual Income (k$)",data=df_shop_focus,palette="pastel") plt.show() # Visualizando a pontuação dos clientes plt.figure(figsize=(25,8)) sns.countplot(x="Spending Score (1-100)",data=df_shop_focus,palette="pastel") plt.show() df_shop.loc[df_shop['Spending Score (1-100)']==99] # ## Clustering com base em 2 recursos # Primeiro, irei trabalhar com apenas dois recursos, que serão: receita anual e pontuação de gastos. from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler,StandardScaler X = df_shop_focus [['Annual Income (k$)','Spending Score (1-100)']] #normalizando scaler = MinMaxScaler() X = scaler.fit_transform(X) # Agora calculamos a Soma dos Erros Quadráticos Dentro do Cluster (wcss/sigla do inglês) para diferentes valores de k. # Em seguida, escolhemos o k para o qual o wcss primeiro começa a diminuir. # Este valor de K nos dá o melhor número de clusters para fazer a partir dos dados brutos. wcss=[] for i in range (1,11): kmeans=KMeans(n_clusters=i, random_state=0) kmeans.fit(X) wcss.append(kmeans.inertia_) # n_clusters = O número de clusters a serem formados, bem como o número de centróides a serem gerados. # # inicialização (k-means++) = seleciona centros de cluster iniciais para agrupamento de média k de uma maneira inteligente para acelerar a convergência. # # max_iter(300) = Número máximo de iterações do algoritmo k-means para uma única execução. # # n_init(10) = Número de vezes que o algoritmo k-means será executado com diferentes sementes de centróide. Os resultados finais serão o melhor resultado de n_init execuções consecutivas. # # kmeans.inertia_ = Soma das distâncias quadradas das amostras ao centro do cluster mais próximo, ponderada pelos pesos da amostra, se fornecidos. Associado a isso, o .append adiciona valor completo. # # A soma dos erros quadrados dentro do cluster: # # O erro quadrático para cada ponto é o quadrado da distância do ponto a partir de sua representação, ou seja, seu centro previsto do cluster. # # A pontuação do WSS é a soma desses erros quadrados para todos os pontos. #A curva do cotovelo plt.figure(figsize=(12,6)) plt.plot(range(1,11),wcss) plt.plot(range(1,11),wcss, linewidth=2, color="red", marker ="8") plt.xlabel("Valores de K") plt.xticks(np.arange(1,11,1)) plt.ylabel("Soma dos Erros Quadráticos") plt.show() # ##### Isso é conhecido como gráfico do cotovelo, o eixo x sendo o número de clusters. O número de clusters é obtido no ponto da articulação do cotovelo. Este é o ponto em que criar clusters é mais relevante, pois aqui o valor de WCSS para de diminuir repentinamente. Aqui no gráfico, após 5, a queda é mínima, então consideramos 5 como o número de clusters para este método. kmeans = KMeans(n_clusters=5,random_state=0).fit(X) #preditar os rótulos dos dados de entrada y_kmeans = kmeans.predict(X) #kmeans.cluster_centers_ # Plotando os clusters e os centróides fig = plt.figure(figsize=(25, 10)) plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s = 100, c = 'red', label = 'Cluster 1') plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s = 100, c = 'blue', label = 'Cluster 2') plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s = 100, c = 'green', label = 'Cluster 3') plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s = 100, c = 'cyan', label = 'Cluster 4') plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s = 100, c = 'magenta', label = 'Cluster 5') plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids') plt.title('Clusters de clientes') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100)') plt.legend() plt.show() # ### Analisando os resultados: # Podemos ver que os clientes do shopping podem ser agrupados de forma ampla em 5 grupos com base em suas compras feitas no shopping: # # No cluster 1 (vermelho), são as pessoas com renda alta, mas com scores de gastos baixos. Talvez sejam esses clientes que estão insatisfeitos ou insatisfeitas com os serviços do shopping. Esses podem ser os alvos principais do shopping, pois têm potencial para gastar dinheiro. # # No cluster 2 (azul), são as pessoas com renda média e scores de gastos médio. # # No cluster 3 (verde), são as pessoas com alta renda e altos scores de gastos, este é o caso ideal para shopping ou lojas, pois essas pessoas são as principais fontes de lucro. # # No cluster 4 (ciano), são as pessoas com baixas rendas, mas com scores altos de gastos, são aquelas pessoas que por algum motivo compram produtos com mais frequência, embora tenham uma renda baixa. # # No cluster 5 (magenta), são as pessoas com baixa renda anual e baixos scores de gastos, o que faz sentido, por ter menor salário, compra menos. # ## Clustering com base em 3 recursos # Agora irei trabalhar com 3 recursos, que são: a idade dos clientes, receita anual e a pontuação de gastos. df2 = df_shop_focus[['Age','Annual Income (k$)','Spending Score (1-100)']] X_2 = df2 #normalizando #scaler = MinMaxScaler() #X_2 = scaler.fit_transform(X_2) # Agora calculamos a Soma dos Erros Quadráticos Dentro do Cluster (wcss/sigla do inglês) para diferentes valores de k. # Em seguida, escolhemos o k para o qual o wcss primeiro começa a diminuir. # Este valor de K nos dá o melhor número de clusters para fazer a partir dos dados brutos. wcss=[] for i in range (1,11): kmeans_2=KMeans(n_clusters=i, random_state=0) kmeans_2.fit(X_2) wcss.append(kmeans_2.inertia_) #A curva do cotovelo plt.figure(figsize=(12,6)) plt.plot(range(1,11),wcss) plt.plot(range(1,11),wcss, linewidth=2, color="red", marker ="8") plt.xlabel("Valores de K") plt.xticks(np.arange(1,11,1)) plt.ylabel("Soma dos Erros Quadráticos") plt.show() # #### Assume-se aqui também, que k=5 é um bom valor kmeans_2 = KMeans(n_clusters=5,random_state=0).fit(X_2) #preditar os rótulos dos dados de entrada y_kmeans_2 = kmeans_2.predict(X_2) # Os dados com rótulos df2 ["label"] = y_kmeans_2 kmeans_2.cluster_centers_ control = df_shop_focus[['Age','Annual Income (k$)','Spending Score (1-100)']] for i in control: plt.figure(figsize=(6,4)) ax = sns.boxplot(x='label',y=i,data=df2) plt.title('\nBox Plot {}\n'.format(i),fontsize=15) plt.show() # ### Analisando os resultados: # Podemos ver que os clientes do shopping podem ser agrupados de forma ampla em 5 grupos com base em suas compras feitas no shopping: # # No cluster 1 (azul), são as pessoas de faixa etária mais idosa, com renda baixa e com scores de gastos mais baixos. O que faz sentido. # # No cluster 2 (laranja), são as pessoas na faixa dos 30 anos, com renda alta e com scores de gastos altos. Este é o caso ideal para shopping ou lojas, pois essas pessoas são as principais fontes de lucro. # # No cluster 3 (verdes), é o agrupamento com pessoas na faixa mais abrangente, com renda mais média e scores de gastos médios. # # No cluster 4 (vermelho), são as pessoas que em sua maioria estão na faixa entre 35-45, com rendas altas mas com scores de gastos baixos. Talvez sejam esses clientes que estão insatisfeitos ou insatisfeitas com os serviços do shopping. Mas também pode ser, analisando a maioria das idades dos clientes, os mesmos podem ter gastos mais latentes, como por exemplo com filhos na infância ou adolescência. # # No cluster 5 (roxo), são as pessoas mais jovens, com renda baixa mas com scores de gastos altos. O que pode explicar é a falta de uma maior maturidade e controle da vida financeira. #3D Plot as we did the clustering on the basis of 3 input features fig = plt.figure(figsize=(30,20)) ax = fig.add_subplot(111, projection='3d') ax.scatter(df2.Age[df2.label == 0], df2["Annual Income (k$)"][df2.label == 0], df2["Spending Score (1-100)"][df2.label == 0], c='purple', s=60) ax.scatter(df2.Age[df2.label == 1], df2["Annual Income (k$)"][df2.label == 1], df2["Spending Score (1-100)"][df2.label == 1], c='red', s=60) ax.scatter(df2.Age[df2.label == 2], df2["Annual Income (k$)"][df2.label == 2], df2["Spending Score (1-100)"][df2.label == 2], c='blue', s=60) ax.scatter(df2.Age[df2.label == 3], df2["Annual Income (k$)"][df2.label == 3], df2["Spending Score (1-100)"][df2.label == 3], c='green', s=60) ax.scatter(df2.Age[df2.label == 4], df2["Annual Income (k$)"][df2.label == 4], df2["Spending Score (1-100)"][df2.label == 4], c='yellow', s=60) ax.scatter(kmeans_2.cluster_centers_[:, 0], kmeans_2.cluster_centers_[:, 1],kmeans_2.cluster_centers_[:, 2], s = 300, c = 'black', label = 'Centroids') ax.view_init(35, 185) plt.xlabel("Age") plt.ylabel("Annual Income (k$)") ax.set_zlabel('Spending Score (1-100)') plt.show() # ### Extra #
Atividade 7/Atividade 7 - K-means.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # imports #standard from collections import deque # extra import numpy as np import pandas as pd from sklearn.metrics import confusion_matrix # local from feature_helpers import * # display settings pd.set_option('display.max_columns', 21) pd.set_option('display.max_colwidth', 400) pd.set_option('display.precision', 4) pd.set_option('display.width', 1600) # - recipes = load_clean_data() recipes.strings = remove_dupes(recipes.strings) recipes.ingredients = remove_dupes(recipes.ingredients) recipes.strings = add_combos(recipes.strings) recipes.ingredients = add_combos(recipes.ingredients) string_counts = make_counts(recipes, field='strings') string_merged, string_renamed = merge_rare_features(string_counts, 1, 3, 'raretype') string_merged = string_merged.drop(columns=['test']) string_rates = scale_counts(string_merged, recipes.cuisine.value_counts()) string_rates.loc['rarecombotype'] = 0.0 recipes.strings = remove_dupes(recipes.strings) string_props = get_proportions(string_rates) string_props.loc['rarecombotype'] = 0.0 recipes.strings = recipes.strings.map(lambda ings: update_names(ings, string_renamed)) recipes.strings = remove_dupes(recipes.strings) string_prop_weights = { 'brazilian': [0.98, 0.4], 'british': [1.00, 0.5], 'cajun_creole': [0.89, 0.5], 'chinese': [0.99, 0.4], 'filipino': [1.03, 0.4], 'french': [1.07, 0.4], 'greek': [0.97, 0.4], 'indian': [0.93, 0.4], 'irish': [0.93, 0.4], 'italian': [1.05, 0.3], 'jamaican': [0.86, 0.4], 'japanese': [1.16, 0.3], 'korean': [0.95, 0.4], 'mexican': [1.16, 0.4], 'moroccan': [0.86, 0.5], 'russian': [0.92, 0.5], 'southern_us': [1.07, 0.4], 'spanish': [1.02, 0.4], 'thai': [0.97, 0.4], 'vietnamese': [0.93, 0.4] } string_points = reweight(string_props, string_prop_weights) string_scores = recipes.apply(lambda recipe: string_points.loc[recipe.strings].mean(), axis='columns') string_ranks = string_scores.apply(rank, axis='columns') string_ratios = string_scores.apply(ratio, axis='columns') counts = make_counts(recipes) rare_merged, renamed = merge_rare_features(counts, 1, 3, 'raretype') rare_merged = rare_merged.drop(columns=['test']) rates = scale_counts(rare_merged, recipes.cuisine.value_counts()) rates.loc['rarecombotype'] = 0.0 recipes.ingredients = remove_dupes(recipes.ingredients) props = get_proportions(rates) props.loc['rarecombotype'] = 0.0 recipes.ingredients = recipes.ingredients.map(lambda ings: update_names(ings, renamed)) recipes.ingredients = remove_dupes(recipes.ingredients) prop_weights = { 'brazilian': [0.99, .4], 'british': [1.02, .5], 'cajun_creole': [0.83, .5], 'chinese': [0.97, .4], 'filipino': [1.04, .4], 'french': [1.08, .4], 'greek': [0.93, .4], 'indian': [0.91, .5], 'irish': [0.96, .4], 'italian': [1.04, .3], 'jamaican': [0.97, .5], 'japanese': [1.08, .3], 'korean': [0.96, .4], 'mexican': [1.04, .4], 'moroccan': [0.87, .5], 'russian': [0.97, .5], 'southern_us': [1.06, .4], 'spanish': [0.99, .4], 'thai': [0.91, .4], 'vietnamese': [0.97, .5], } points = reweight(props, prop_weights) scores = recipes.apply(lambda recipe: make_scores(recipe, points), axis='columns') ranks = scores.iloc[:, 0:20].apply(rank, axis='columns') top_points = points[points >= .2].fillna(0) top_scores = recipes.apply(lambda recipe: top_points.loc[recipe.ingredients].mean(), axis='columns') top_rates = top_scores * scores.iloc[:, 0:20].applymap(inverse) ing_weights = { 'brazilian': [0.99, .4], 'british': [1.0, .5], 'cajun_creole': [0.86, .5], 'chinese': [0.97, .4], 'filipino': [1.04, .4], 'french': [1.08, .4], 'greek': [0.95, .4], 'indian': [0.94, .5], 'irish': [0.94, .4], 'italian': [1.05, .3], 'jamaican': [0.96, .5], 'japanese': [1.1, .3], 'korean': [0.96, .4], 'mexican': [1.05, .4], 'moroccan': [0.85, .5], 'russian': [0.95, .5], 'spanish': [0.99, .4], 'southern_us': [1.05, .4], 'thai': [0.91, .4], 'vietnamese': [0.98, .5], } ing_points = reweight(ings_only(props), ing_weights) ing_scores = recipes.apply(lambda recipe: ing_points.loc[recipe.ingredients].mean(), axis='columns') ing_ranks = ing_scores.apply(rank, axis='columns') standardize_ranks = lambda ranks: pd.Series(np.arange(0, 1, 1/len(ranks)), index=ranks.sort_values().index) std_ranks = ranks.apply(standardize_ranks, axis='columns') group_scores = make_group_features(ranks) comp_ratios = make_comparison_features(ranks) ranks = ranks.add_prefix('rank_') string_scores = string_scores.add_prefix('str_') string_ranks = string_ranks.add_prefix('strrk_') string_ratios = string_ratios.add_prefix('strrt_') std_ranks = std_ranks.add_prefix('stdr_') top_scores = top_scores.add_prefix('top_') top_rates = top_rates.add_prefix('topr_') ing_scores = ing_scores.add_prefix('ing_') ing_ranks = ing_ranks.add_prefix('ingr_') features = pd.concat([string_scores, string_ranks, string_ratios, scores, ranks, std_ranks, top_scores, top_rates, ing_scores, ing_ranks, group_scores, comp_ratios], axis='columns') output = pd.concat([recipes, features], axis='columns') output.head() last_e = deque(6*[0], 6) output = pd.concat([recipes, string_scores], axis='columns') def get_preds(recipe): maxpos = recipe.iloc[3:23].values.argmax() pred = recipe.index[maxpos + 3] return (pred, recipe.cuisine) preds = output.query('cuisine != "test"').apply(get_preds, axis='columns', result_type='expand') preds.columns = ['falpos', 'falneg'] e = preds.query('falpos != falneg') errs = len(e) last_e.appendleft(errs) print(errs, 1 - errs / len(output.query('cuisine != "test"')), last_e) # 6900, strings:6700 labels = sorted(e.falneg.unique()) cnf = confusion_matrix(e.falneg, e.falpos, labels=labels) plot_cnf(cnf, labels) falpos_counts, falneg_counts = e.falpos.value_counts(), e.falneg.value_counts() pd.concat([falpos_counts, falneg_counts, falpos_counts + falneg_counts, output.query('cuisine != "test"').cuisine.value_counts()], axis='columns', sort=False) save_output(output)
make_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import sys, os base_path = os.getcwd()[0:os.getcwd().rfind('Watermark')] + "Watermark/" sys.path.append(base_path) import matplotlib.pyplot as plt from src.asiaccs_main import asiaccs_whitebox from src.models import get_deep_cnn_for_cifar, get_lenet_model_for_mnist from src.preprocess_data import load_cifar_images, load_mnist_images # - # # Description # In this notebook we are running a surrogate model attack. The attacker and owner data is disjoint. surr_model, all_history = asiaccs_whitebox( load_dataset_func=load_cifar_images, dataset_label="CIFAR", load_wm_model_func=get_deep_cnn_for_cifar, wm_type="gaussian", owner_data_size=25000, total_owner_data_size=100000, key_length=10000, key_length_test=1000, attacker_data_size=25000, total_attacker_data_size=100000, attacker_data_size_reg=3000, epochs_embed=20, epochs_reg=30, epochs_surr=15, freeze_first_layers=0, early_stopping_wm_reg=0.1, lr_surr=0.001, reg_whitebox=0.003, reg_surr=0, batchsize_reg=64, batchsize_surr=64, cache_embed_wm="asiaccs_gaussian_cifar", cache_reg_model=None, cache_surr_model=None, verbose=True ) # + embed_history, reg_history, surr_history = all_history for history in all_history: print(history.history.keys()) plt.figure(figsize=(20,10)) params = {'legend.fontsize': 20, 'legend.handlelength': 2, 'font.size': 16} test_acc_color = "navy" linestyle_test_acc = "x-" linestyle_watermark = "x--" watermark_ret_color = "green" watermark_ret_color2 = "green" linewidth = 3.0 markersize = 12 # Merge all times time_arr = embed_history.history['time'] x_axis_time = [] for i in range(0, len(time_arr)): t = time_arr[i] for j in range(0,i): t += time_arr[j] x_axis_time.append(t/60) offset = x_axis_time[-1] time_arr2 = reg_history.history['time'] for i in range(0, len(time_arr2)): t = time_arr2[i] for j in range(0,i): t += time_arr2[j] x_axis_time.append(t/60+offset) offset2 = x_axis_time[-1] time_arr3 = surr_history.history['time'] for i in range(0, len(time_arr3)): t = time_arr3[i] for j in range(0,i): t += time_arr3[j] x_axis_time.append(t/60+offset2) # Merge all values y_axis_acc = embed_history.history['val_acc'] + reg_history.history['val_acc'] + surr_history.history['val_acc'] y_axis_wm = embed_history.history['watermark_val'] + reg_history.history['watermark_val'] + surr_history.history['watermark_val'] plt.xlabel('Time in min', fontsize=26) plt.ylabel('Accuracy', fontsize=26) lh1, lh2, lh3 = len(embed_history.history['val_acc']), len(reg_history.history['val_acc']), len(surr_history.history['val_acc']) plt.plot(x_axis_time, y_axis_acc, linestyle_test_acc, linewidth=linewidth, markersize=markersize, color=test_acc_color) plt.plot(x_axis_time, y_axis_wm, linestyle_watermark, linewidth=linewidth, markersize=markersize, color=watermark_ret_color2) plt.axvline(x_axis_time[lh1], linestyle=':', color='red') plt.axvline(x_axis_time[lh1+lh2], linestyle=':', color='red') # Annotate accuracy data points dat_y = y_axis_acc dat_x = x_axis_time ctr = 0 for xy in zip(dat_x, dat_y): if ctr % 3 == 0 or ctr == len(dat_y): plt.annotate("{:.3f}".format(xy[1]), xy=(xy[0], xy[1]+0.02), textcoords='data', fontsize=14) # <-- ctr += 1 # Annotate wm data points dat_y = y_axis_wm dat_x = x_axis_time ctr = 0 for xy in zip(dat_x, dat_y): if ctr % 3 == 0 or ctr == len(dat_y): plt.annotate("{:.3f}".format(xy[1]), xy=(xy[0], xy[1]+0.02), textcoords='data', fontsize=14) # <-- ctr += 1 plt.ylim(0,1.05) plt.xlim(0) plt.grid() plt.legend(['test accuracy', 'wm retention'], loc='lower left') plt.show() # -
notebooks/backdoor_watermark/CIFAR-10/.ipynb_checkpoints/asiaccs_gaussian_whitebox_cifar-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #imports import pandas as pd import numpy as np import datetime as dt from os import listdir from IPython.display import display import re import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.linear_model import Lasso, Ridge from sklearn.metrics import r2_score from sklearn.model_selection import cross_val_score import time #parameters path = 'C:/Cursos/Become a Data Scientist/Introduction to Data Science/Data/' #path to csv files pd.set_option('display.max_columns', None) #see all columns of a dataframe # - # ## Business Understanding # 1 - What areas generate more revenue? # # 2 - What kind of properties is best suited for these areas? # # 3 - Can we predict future revenue? # # To discover this, we will explore data from airbnb from boston and seattle # ## Data Understanding # Lets take a look into the data to see what kind of information we have # # Downloading the datasets # Download the dataset from: # # [https://www.kaggle.com/airbnb/seattle/data](https://www.kaggle.com/airbnb/seattle/data) # # [https://www.kaggle.com/airbnb/boston](https://www.kaggle.com/airbnb/boston) # # The files of the two cities have the same names, so I renamed for _city (ex: calendar_boston.csv) # # * Listings, including full descriptions and average review score # # * Reviews, including unique id for each reviewer and detailed comments # # * Calendar, including listing id and the price and availability for that day # # There is more information about the data here: # http://insideairbnb.com/about.html # and a dictionary here: # https://docs.google.com/spreadsheets/d/1iWCNJcSutYqpULSQHlNyGInUvHg2BoUGoNRIGa6Szc4/edit#gid=982310896 # #importing the data def import_df(path, prefix, separator='_', col_name='file'): ''' Import all the csv files with the same prefix and concatenate them in the same DF. Create a column with the second splitted element of the separator. path = path of the files prefix = prefix (name) of the csv files separator = the separator of the prefix and the city in the name of the csv col_name = name of the column of the splitted element that goes into the dataframe ''' df = pd.DataFrame() for file in listdir(path): if (file.endswith('.csv') and file.startswith(prefix)): df_temp = pd.read_csv(path + file) df_temp[col_name] = re.split('_|\.csv', file)[1] #second element of split and remove extension df = pd.concat([df, df_temp], ignore_index=True) print('Read file: {} Shape: {}'.format(file, df_temp.shape)) return df #gathering the data df_calendar = import_df(path, 'calendar') df_listings = import_df(path, 'listings') df_reviews = import_df(path, 'reviews') # Let's have a quick assesment of the data df_calendar['date'] = pd.to_datetime(df_calendar['date']) display(df_calendar.head(3)) df_calendar.groupby(['file']).agg({'date' : [np.min,np.max]}) #see timespan of each file display(df_listings.head(1)) df_reviews['date'] = pd.to_datetime(df_reviews['date']) display(df_reviews.head(3)) df_reviews.groupby(['file']).agg({'date' : [np.min,np.max]}) #see timespan of each file # Lookind at the data, it seems that the boston file was generated on 2016-09-06 and the seattle file was generated on 2016-01-03. # We have several useful information: # * Geographic information such as lat/lon coordinates, street, neighbourhood, city, state, country and zipcode # * Listing characteristics like property_type, room_type and others # * Host characteristics such as host_since, host_total_listings_count, host_has_profile_pic, etc. # * Review scores review_scores_value, review_scores_rating and more # # ## Data Preparation # We need to clean the data first before trying to answer our question # + #Our Boston listing dataframe have 3 more columns than the Seattle one, and #both have a lot of columns that we don't need, so get only the columns that #are related to our question (location, property characteristics, price and reviews) listings_cols = ['id', 'street', 'neighbourhood', 'neighbourhood_cleansed', 'neighbourhood_group_cleansed', 'city', 'state', 'zipcode', 'market', 'smart_location', 'country', 'latitude', 'longitude', 'is_location_exact', 'property_type', 'room_type', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'bed_type', 'amenities', 'square_feet', 'price', 'weekly_price', 'monthly_price', 'security_deposit', 'cleaning_fee', 'guests_included', 'extra_people', 'minimum_nights', 'maximum_nights', 'number_of_reviews', 'review_scores_rating', 'review_scores_accuracy', 'review_scores_cleanliness', 'review_scores_checkin', 'file', 'review_scores_communication', 'review_scores_location', 'review_scores_value', 'cancellation_policy', 'availability_365', 'calendar_last_scraped', 'availability_30', 'availability_60', 'availability_90'] df_listings = df_listings[listings_cols] #Diving a little deeper into the data, we can see that we have 3 columns for neighbourhood #so we will keep only the one with fewer missing values missing_neighbourhood = df_listings[['neighbourhood','neighbourhood_cleansed', 'neighbourhood_group_cleansed']].count() / df_listings.shape[0] print('Missing Values froom Neighbourhood Columns:\n{}'.format(missing_neighbourhood)) df_listings.drop(columns=['neighbourhood', 'neighbourhood_group_cleansed'], inplace=True) #Since there is a lot of other cities in the "city" and "smart_location" columns, we will replace it by the #values of the "file" column and drop it print('\nCity Unique Values:\n{}'.format(df_listings['city'].unique())) df_listings['city'] = df_listings['file'] df_listings.drop(columns=['file', 'smart_location'], inplace=True) #Convert price related columns to number price_cols = ['price', 'weekly_price', 'monthly_price', 'security_deposit', 'cleaning_fee', 'extra_people'] df_listings[price_cols] = df_listings[price_cols].replace('[\$,]','', regex=True) df_listings[price_cols] = df_listings[price_cols].astype(float) # + #The reviews dataframe is a little easier, let just keep the columns that we want. #The comments column have very useful information, but since we won't do any text analysis #we won't keep it reviews_cols = ['listing_id', 'id', 'date', 'reviewer_id', 'file'] df_reviews = df_reviews[reviews_cols] #And rename the "file" for "city" also df_reviews.rename(columns={'file' : 'city'}, inplace=True) # + #For the calendar, we will convert the "available" flag for 1(true) and 0(false) df_calendar['available'] = (df_calendar['available'] == 't')*1 #And rename the "file" for "city" also df_calendar.rename(columns={'file' : 'city'}, inplace=True) # - # ### Calculating occupancy # # Inside Airbnb have an occupancy model that we will replicate, it consist mainly of these 3 characteristics: # # * **Convert review rate to estimated booking** # # Accordingly with Airbnb, 72% of the bookings leave reviews, the Budget and Legislative Analyst's Office made their analysis with this value in addition to other of 30.5%. We will go with Inside Airbnb and consider that 50% of the bookings lead to reviews # # * **Avarage length of stay** # # We will consider an avarage length of stay of 3 days. If the minimum nights of a listing is higher, then this value is used # # * **Occupancy rate was capped at 70%** # # # font: http://insideairbnb.com/about.html # + df_booking = df_reviews.copy() df_booking['month'] = pd.DatetimeIndex(df_booking['date']).month df_booking['year'] = pd.DatetimeIndex(df_booking['date']).year df_booking['days_in_month'] = pd.DatetimeIndex(df_booking['date']).daysinmonth df_booking = df_booking.groupby(['listing_id', 'city', 'month', 'year', 'days_in_month'], as_index=False).agg({'id' : 'count'}) df_booking.rename(columns={'id' : 'reviews'}, inplace=True) df_booking['booking'] = df_booking['reviews'] * 2 #only 50% leave reviews df_listings2 = df_listings[['id', 'neighbourhood_cleansed', 'property_type', 'room_type', 'accommodates', 'bathrooms', 'bedrooms', 'beds', 'bed_type', 'square_feet', 'price', 'cancellation_policy', 'minimum_nights']] df_revenue = df_booking.merge(df_listings2, how='left', left_on='listing_id', right_on='id') df_revenue['minimum_nights'].where(df_revenue['minimum_nights'] > 3, 3, inplace=True) df_revenue['max_days'] = df_booking['days_in_month'] * 0.7 #capped occupancy df_revenue['days'] = df_revenue['booking'] * df_revenue['minimum_nights'] df_revenue['days'] = df_revenue[['days', 'max_days']].min(axis=1) df_revenue['occupancy_rate'] = df_revenue['days'] / df_booking['days_in_month'] df_revenue['estimated_revenue'] = df_revenue['days'] * df_revenue['price'] # - # ### Analysing the data to answer questions 1 and 2 # Visualizing the data def plot_revenue_neighbourhood(year = None): ''' Plot stacked barchart of Top 10 Seattle and Boston Neighbourhood by Property Type year = filter specific year, if nothing is passed it doesn't filter any year ''' if year is not None: df_filtered = df_revenue[df_revenue['year'] == year] year_title = year else: df_filtered = df_revenue year_title = 'All Years' df_revenue_plot = df_filtered.groupby(['neighbourhood_cleansed', 'city', 'property_type' ]).agg({'estimated_revenue' : 'sum', 'listing_id' : 'count'}) df_revenue_plot.rename(columns={'listing_id' : 'listing_quantity'}, inplace=True) df_revenue_plot.reset_index(inplace=True) #ranking and top 10 for each city df_revenue_temp = df_filtered.groupby(['neighbourhood_cleansed', 'city' ]).agg({'estimated_revenue' : 'sum'}) df_revenue_temp.reset_index(inplace=True) df_revenue_temp.sort_values('estimated_revenue', ascending=False, inplace=True) df_revenue_temp['rank'] = df_revenue_temp.groupby(['city'])['estimated_revenue'].rank(ascending=False, method='dense') df_revenue_temp.drop(columns=['estimated_revenue'], inplace=True) df_revenue_plot = df_revenue_plot.merge(df_revenue_temp, how='left', on=['city','neighbourhood_cleansed']) df_revenue_plot.sort_values('rank', inplace=True) df_revenue_plot_boston = df_revenue_plot[(df_revenue_plot['city'] == 'boston') & (df_revenue_plot['rank'] <= 10)] df_revenue_plot_seattle = df_revenue_plot[(df_revenue_plot['city'] == 'seattle') & (df_revenue_plot['rank'] <= 10)] #get xlabels boston_labels = list(df_revenue_plot_boston['neighbourhood_cleansed'].drop_duplicates()) seattle_labels = list(df_revenue_plot_seattle['neighbourhood_cleansed'].drop_duplicates()) df_pivot_boston = df_revenue_plot_boston.pivot_table(index=['rank'], columns=['property_type'], values=['estimated_revenue'], fill_value=0, aggfunc='sum', margins=True) df_pivot_boston.sort_values(by='All', ascending=False, axis=1, inplace=True) #sort columns by total value df_pivot_boston.drop(columns=['All'], level=1, inplace=True) df_pivot_boston.drop('All', axis=0, inplace=True) df_pivot_boston = df_pivot_boston.droplevel(0,axis=1) df_pivot_seattle = df_revenue_plot_seattle.pivot_table(index=['rank'], columns=['property_type'], values=['estimated_revenue'], fill_value=0, aggfunc='sum', margins=True) df_pivot_seattle.sort_values(by='All', ascending=False, axis=1, inplace=True) df_pivot_seattle.drop(columns=['All'], level=1, inplace=True) df_pivot_seattle.drop('All', axis=0, inplace=True) df_pivot_seattle = df_pivot_seattle.droplevel(0,axis=1) #plotting fig, ax = plt.subplots(1,2, figsize=(15,7), sharey=True) df_pivot_seattle.plot.bar(stacked=True, ax=ax[0], legend=True, colormap='tab20') df_pivot_boston.plot.bar(stacked=True, ax=ax[1], legend=False, colormap='tab20') #adjusting appearance and labels ax[0].legend() ax[1].legend() ax[0].set_xticklabels(seattle_labels, rotation=70, fontsize=11) ax[1].set_xticklabels(boston_labels, rotation=70, fontsize=11) ax[0].set_ylabel('Estimated Revenue in Million USD', fontsize=13) ax[0].set_xlabel('Neighbourhood', fontsize=13) ax[1].set_xlabel('') ax[0].set_title('Seattle by Property Type', fontsize=16) ax[1].set_title('Boston by Property Type', fontsize=16) ax[0].xaxis.set_label_coords(1.05, -0.3) ax[0].spines['right'].set_visible(False) ax[0].spines['top'].set_visible(False) ax[1].spines['right'].set_visible(False) ax[1].spines['top'].set_visible(False) fig.suptitle('Top 10 Neighbourhood ' + str(year_title), fontsize=20) plt.tight_layout() plt.show() #answering question 1: #lets see a 2015 comparison since is the last full year of both cities plot_revenue_neighbourhood(2015) # Here we can see that Broadway is the leader with it’s estimated revenue above 2.5 million USD followed by South End. # # We can identify that Seattle’s revenue is concentrated in two main neighbourhoods, Boston on the other hand is a little more uniformly distributed. #and for all years plot_revenue_neighbourhood() # This indicates to us that Boston has proably had a headstart with Airbnb rentals comparing with Seattle, since most of the neighbourhoods have a larger estimated revenue def top_neighbourhood(year = None): ''' Return DataFrame with top 10 neighbourhood and the top 1 property type for each city year = filter specific year, if nothing is passed it doesn't filter any year ''' if year is not None: df_filtered = df_revenue[df_revenue['year'] == year] year_title = year else: df_filtered = df_revenue year_title = 'All Years' df_neighbourhood = df_filtered.groupby(['neighbourhood_cleansed', 'city', 'property_type' ]).agg({'estimated_revenue' : 'sum', 'listing_id' : 'count'}) df_neighbourhood.rename(columns={'listing_id' : 'listing_quantity'}, inplace=True) df_neighbourhood.reset_index(inplace=True) #ranking for each city and top 10 df_neighbourhood_temp = df_filtered.groupby(['neighbourhood_cleansed', 'city' ]).agg({'estimated_revenue' : 'sum'}) df_neighbourhood_temp.reset_index(inplace=True) df_neighbourhood_temp.sort_values('estimated_revenue', ascending=False, inplace=True) df_neighbourhood_temp['rank'] = df_neighbourhood_temp.groupby(['city'])['estimated_revenue'].rank(ascending=False, method='dense') df_neighbourhood_temp.drop(columns=['estimated_revenue'], inplace=True) df_neighbourhood = df_neighbourhood.merge(df_neighbourhood_temp, how='left', on=['city','neighbourhood_cleansed']) df_neighbourhood.sort_values('rank', inplace=True) df_neighbourhood_boston = df_neighbourhood[(df_neighbourhood['city'] == 'boston') & (df_neighbourhood['rank'] <= 10)] df_neighbourhood_seattle = df_neighbourhood[(df_neighbourhood['city'] == 'seattle') & (df_neighbourhood['rank'] <= 10)] idx_boston = df_neighbourhood_boston.groupby(['neighbourhood_cleansed', 'city'])['estimated_revenue'].transform(max) == df_neighbourhood_boston['estimated_revenue'] df_neighbourhood_boston = df_neighbourhood_boston[idx_boston] df_neighbourhood_boston.drop(columns=['city'], inplace=True) df_neighbourhood_boston['Average Revenue'] = (df_neighbourhood_boston['estimated_revenue'] / df_neighbourhood_boston['listing_quantity']).map('${:,.2f}'.format) df_neighbourhood_boston['estimated_revenue'] = df_neighbourhood_boston['estimated_revenue'].map('${:,.2f}'.format) df_neighbourhood_boston.rename(columns={'neighbourhood_cleansed' : 'Boston Neighbourhood', 'property_type' : 'Boston Property Type', 'estimated_revenue' : 'Boston Estimated Revenue', 'listing_quantity' : 'Boston Listing Quantity', 'Average Revenue' : 'Boston Average Revenue', 'rank' : 'Rank ' + str(year_title)}, inplace=True) df_neighbourhood_boston.set_index('Rank ' + str(year_title), inplace=True) idx_seattle = df_neighbourhood_seattle.groupby(['neighbourhood_cleansed', 'city'])['estimated_revenue'].transform(max) == df_neighbourhood_seattle['estimated_revenue'] df_neighbourhood_seattle = df_neighbourhood_seattle[idx_seattle] df_neighbourhood_seattle.drop(columns=['city'], inplace=True) df_neighbourhood_seattle['Average Revenue'] = (df_neighbourhood_seattle['estimated_revenue'] / df_neighbourhood_seattle['listing_quantity']).map('${:,.2f}'.format) df_neighbourhood_seattle['estimated_revenue'] = df_neighbourhood_seattle['estimated_revenue'].map('${:,.2f}'.format) df_neighbourhood_seattle.rename(columns={'neighbourhood_cleansed' : 'Seattle Neighbourhood', 'property_type' : 'Seattle Property Type', 'estimated_revenue' : 'Seattle Estimated Revenue', 'listing_quantity' : 'Seattle Listing Quantity', 'Average Revenue' : 'Seattle Average Revenue', 'rank' : 'Rank ' + str(year_title)}, inplace=True) df_neighbourhood_seattle.set_index('Rank ' + str(year_title), inplace=True) df_neighbourhood_return = pd.concat([df_neighbourhood_seattle, df_neighbourhood_boston], axis=1) return df_neighbourhood_return #answering question 2: #getting the top property type for 2015 top_neighbourhood(2015) #and for all years top_neighbourhood() # ### Now lets see if we can create a model to predict future revenue # # For this, we need to modify our dataset to include our main features that contribute to the prediction # # our target variable will be the estimated_revenue # + df_revenue_ml = df_revenue.drop(columns=['listing_id', 'id', 'occupancy_rate', 'reviews', 'booking']) #we won't have occupancy rate, reviews or booking for new listings #dealing with missing data print('Columns missing count: \n{}'.format(df_revenue_ml.isnull().sum())) print('\nSquare_feet % of missing: {:.2f}'.format(df_revenue_ml['square_feet'].isnull().sum() / df_revenue_ml.shape[0])) #since only 5% of the square_feet have data, we will drop it df_revenue_ml.drop(columns=['square_feet'], inplace=True) #for property_type, we will create e new classification "not_specified" df_revenue_ml['property_type'].fillna('Not Specified', inplace=True) #filling the na for bathrooms with the mean considering property type #we are filling this way because different properties may have different means #for example, is very likely that a house have more bathrooms than an camper, so #by doing this we tend to preserve the features differences between them and still #keep it simple to do and understand bathroom_dict = df_revenue_ml[['property_type', 'bathrooms']].groupby(['property_type']).median()['bathrooms'].to_dict() df_revenue_ml['bathrooms'].fillna(df_revenue_ml['property_type'].map(bathroom_dict), inplace=True) #same for bedrooms bedroom_dic = df_revenue_ml[['property_type', 'bedrooms']].groupby(['property_type']).median()['bedrooms'].to_dict() df_revenue_ml['bedrooms'].fillna(df_revenue_ml['property_type'].map(bedroom_dic), inplace=True) #and for beds beds_dic = df_revenue_ml[['property_type', 'beds']].groupby(['property_type']).median()['beds'].to_dict() df_revenue_ml['beds'].fillna(df_revenue_ml['property_type'].map(beds_dic), inplace=True) #verifying if all missing values have been filled print('\nColumns missing count after filling: \n{}'.format(df_revenue_ml.isnull().sum())) # + #dealing with categorical data print('Columns type: \n{}'.format(df_revenue_ml.dtypes)) #for each categorical data column (object type) we will do a hot encode (create a column for each value of each column) print('\nNumber of columns to be create: \n{}'.format(df_revenue_ml.select_dtypes('object').nunique())) #since is not a huge amount of columns, we are good to go cat_list = list(df_revenue_ml.select_dtypes('object').columns) df_revenue_ml_encoded = pd.get_dummies(df_revenue_ml, prefix=cat_list, columns=cat_list) print('\nDataframe hot encoded shape: {}'.format(df_revenue_ml_encoded.shape)) # - # ## Modeling # # Now its time to model our algorithm #lets take a look in the correlations fig = sns.pairplot(df_revenue_ml, diag_kind='kde') plt.show() # + #splitting the data X = df_revenue_ml_encoded.drop(columns=['estimated_revenue']) y = df_revenue_ml_encoded['estimated_revenue'] X_train, X_test, y_train, y_test= train_test_split(X, y, test_size=0.3, random_state=0) def grid_lasso(): ''' Does a grid search to optimze hyperparameters of the Lasso regression, return the best hyperparameters, the mean score and mean time to fit ''' #GridSearch grid_values = {'alpha' : [0.01, 0.1, 1, 10, 100], 'warm_start' : [True, False], 'positive' : [True, False], 'selection' : ['cyclic', 'random']} scoring = 'r2' ncv = 3 lasso = Lasso(random_state=0) grid_lasso = GridSearchCV(lasso, param_grid=grid_values, scoring=scoring, cv=ncv).fit(X_train, y_train) df_grid = pd.DataFrame(grid_lasso.cv_results_) df_grid = df_grid[['mean_fit_time', 'param_alpha','param_positive','param_selection', 'param_warm_start','mean_test_score']] df_grid.sort_values('mean_test_score', ascending=False, inplace=True) df_grid = df_grid.iloc[0] return df_grid def grid_ridge(): ''' Does a grid search to optimze hyperparameters of the Lasso regression, return the best hyperparameters, the mean score and mean time to fit ''' #GridSearch grid_values = {'alpha' : [0.01, 0.1, 1, 10, 100], 'solver' : ['auto', 'svd', 'cholesky', 'sparse_cg', 'lsqr', 'sag', 'saga']} scoring = 'r2' ncv = 3 ridge = Ridge(random_state=0) grid_ridge = GridSearchCV(ridge, param_grid=grid_values, scoring=scoring, cv=ncv).fit(X_train, y_train) df_grid = pd.DataFrame(grid_ridge.cv_results_) df_grid = df_grid[['mean_fit_time', 'param_alpha', 'param_solver', 'mean_test_score']] df_grid.sort_values('mean_test_score', ascending=False, inplace=True) df_grid = df_grid.iloc[0] return df_grid lasso_gs = grid_lasso() print('Lasso gridsearch results:\n{}'.format(lasso_gs)) ridge_gs = grid_ridge() print('Ridge gridsearch results:\n{}'.format(ridge_gs)) # + #training and predicting with lasso start_lasso = time.time() lasso = Lasso(alpha=0.1, positive=False, selection='random', warm_start=False, random_state=0).fit(X_train, y_train) y_predict_train_lasso = lasso.predict(X_train) y_predict_lasso = lasso.predict(X_test) end_lasso = time.time() print('Lasso Duration: {}'.format(end_lasso - start_lasso)) #training and predicting with Ridge start_ridge = time.time() ridge = Ridge(alpha=10, solver='cholesky', random_state=0).fit(X_train, y_train) y_predict_train_ridge = ridge.predict(X_train) y_predict_ridge = ridge.predict(X_test) end_ridge = time.time() print('Ridge Duration: {}'.format(end_ridge - start_ridge)) # - # ## Evaluation # + print('Lasso Evaluation:') #using r2_score to evaluate performance r2_train = r2_score(y_train, y_predict_train_lasso) r2_test = r2_score(y_test, y_predict_lasso) print('R2 train score: {}\nR2 test score: {}'.format(r2_train, r2_test)) #using cross validation to estimate scores cv_scores = cross_val_score(lasso, X, y, cv=5, scoring='r2') print('Cross Validation R2 mean: {}\nCross Validation R2 standard deviation: {}'.format(cv_scores.mean(),cv_scores.std())) print('\nRidge Evaluation:') #using r2_score to evaluate performance r2_train_ridge = r2_score(y_train, y_predict_train_ridge) r2_test_ridge = r2_score(y_test, y_predict_ridge) print('R2 train score: {}\nR2 test score: {}'.format(r2_train_ridge, r2_test_ridge)) #using cross validation to estimate scores cv_scores_ridge = cross_val_score(ridge, X, y, cv=5, scoring='r2') print('Cross Validation R2 mean: {}\nCross Validation R2 standard deviation: {}'.format(cv_scores_ridge.mean(),cv_scores_ridge.std()))
Udacity_Airbnb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Selenium - Basic # #### 다룰 내용 # 1. Selenium이란 # 2. Selenium 설치 # 3. Selenium 기본 사용법 # 4. Selenium 파일 업로드 # ## 1. Selenium이란 # - Selenium documentation (http://www.seleniumhq.org/) # - 웹 서비스를 테스트 하기 위해 만들어진 브라우저 자동화 라이브러리 (다른 목적으로도 사용할 수 있음) # - 동적인 웹페이지 크롤링에 이용 # - 여러가지 브라우저를 지원하지만 Firefox는 최신 버전 지원 X하는 등 문제가 있을 수도 # - 우리는 가장 많이 사용하는 Chrome driver 사용 # - 여러가지 OS, 다른 언어에서도 사용 가능 # ## 2. Selenium 설치 # ### 2.1 Install chrome driver # - [다운로드 링크](https://sites.google.com/a/chromium.org/chromedriver/home) ← 여기에서 다운로드 # - Mac: chrome driver를 글로벌하게 사용하기 위해 경로 변경 # - `$ mv ~/Donwloads/chromedriver /usr/local/bin` # - Windows: 환경변수 추가 # - 환경변수 설정이 잘 되지 않을 때는 같은 폴더에 chromedriver.exe를 넣거나 코드에 path를 넣어주면 됨 # # # ### 2.2 Install selenium # - Mac: `$ sudo pip3 install selenium` # - Windows: `$ conda install -c conda-forge selenium` # # from selenium import webdriver # ## 3. Selenium 기본 사용법 # ### 3.1 브라우저 open driver = webdriver.Chrome() # 객체를 만들어서 driver에 넣어줌 # ### 3.2 페이지 이동 # - url에 꼭 http를 포함해야 함 driver.get("http://naver.com") # ### 3.3 window size 조절 # - 반응형 웹페이지 테스트에 이용할 수 있음 driver.set_window_size(600, 800) # ### 3.4 브라우저 스크롤 조절 # - 함수가 없는 경우에는 javascript 코드를 실행해서 컨트롤 script = "window.scrollTo(200,300);" driver.execute_script(script) # ### 3.5 윈도우 객체 저장 # - 윈도우 객체를 저장하면 브라우저에 여러 개의 탭이 있을 때 저장되어 있는 윈도우 객체의 탭으로 돌아올 수 있음 main_window = driver.current_window_handle main_window # ### 3.6 새로운 윈도우 탭 열기 # javascript 코드로 새로운 탭을 열 수 있음 script = "window.open('http://google.com');" driver.execute_script(script) # 새로운 탭을 열었지만 아래와 같이 current_window(포커스)가 자동으로 바뀌지는 않음 driver.current_window_handle # ### 3.7 현재 열려있는 윈도우 객체 확인 windows = driver.window_handles windows # ### 3.8 윈도우 포커스 변경 # - current window가 자동으로 바뀌지 않았기 때문에 바꿔주기 # - 이 방법으로 탭을 이동하면서 작업할 수 있음 driver.switch_to_window(windows[1]) driver.current_window_handle driver.switch_to_window(main_window) # ### 3.9 페이지 새로고침 script = "location.reload();" driver.execute_script(script) # ### 3.10 alert messege 처리 # alert messege 만들기 script = "alert('selenium test');" driver.execute_script(script) # switch_to_alert 함수로 alert 변수에 alert 객체를 저장할 수 있음 # alert 객의 text 값을 확인해보면 alert의 메세지를 문자열로 확인 가능 alert = driver.switch_to_alert() alert.text # 확인 버튼 누르기 alert.accept() # ### 3.11 confirm 처리 # - confirm은 alert과 달리 확인/취소 버튼이 함께 있음 script = "confirm('data')" driver.execute_script(script) confirm = driver.switch_to_alert() confirm.text # confirm.accept() # 확인 버튼 클릭 confirm.dismiss() # 취소 버튼 클릭 # ### 3.12 input창에 문자열 입력하기 # - send_keys()를 이용하여 웹페이지의 input 태그에 문자열을 입력할 수 있음 # - find_element_by_css_selector # - html 코드의 태그 위치에 접근하는 함수 # - 파라미터로 css selector를 넣어주면 해당 selector의 태그 위치로 접근 # 네이버 검색창에 "데이터사이언스" 입력하기 driver.find_element_by_css_selector("#query").send_keys("데이터사이언스") # ### 3.13 버튼 클릭하기 (click event 발생) # 검색 버튼 눌러 "데이터사이언스" 검색 결과 확인하기 driver.find_element_by_css_selector("#search_btn").click() # ### 3.14 끝내기 # #### 창 닫기 - 하나의 윈도우 창을 닫음 driver.close() # #### 브라우저 닫기 driver.quit() # ## 4. Selenium 파일 업로드 # - google vision api 페이지에 이미지 파일을 업로드 # - iframe으로 이동 # ### 4.1 webdriver 실행하고 google vision api로 이동 url = "https://cloud.google.com/vision/" driver = webdriver.Chrome() driver.get(url) driver.set_window_size(800, 800) script = "window.scrollTo(0,1200);" driver.execute_script(script) # ### 4.2 iframe으로 포커스 이동 # - iframe 안에 있는 내용을 crawling하기 위해서는 꼭 iframe으로 focus switch! iframe = driver.find_element_by_css_selector('#vision_demo_section > iframe') driver.switch_to_frame(iframe) # + ## iframe에서 메인 프레임으로 돌아가기 # driver.switch_to_default_content() # - # ### 4.3 파일 업로드 # - 절대경로로 작성해야 함 file_path = "/Users/hyeshinoh/Workspace/Study_Web/dog.png" driver.find_element_by_css_selector('#input').send_keys(file_path) # ### 4.4 분석 결과 확인 # #### (1) Web 결과 가져오기 # 이미지 분석 결과에서 Web 탭을 선택 driver.find_element_by_css_selector('#webDetection').click() # 분석 결과를 가져오기 web_results = driver.find_elements_by_css_selector('#card > .container > .row')[:15] for web_result in web_results: print( web_result.find_element_by_css_selector(".name").text, web_result.find_element_by_css_selector(".score").text ) # #### (2) Document 결과 (사진 속 text) 가져오기 # 이미지 분석 결과에서 Document 탭을 선택 driver.find_element_by_css_selector("#fullTextAnnotation").click() # text 데이터를 가져오기 document_results = driver.find_elements_by_css_selector("#card > fieldset > div > fieldset") len(document_results) documents = [] for document_result in document_results: words = document_result.find_elements_by_css_selector(".container .word") print(len(words)) sentence_list = [] for word in words: word = word.text.replace(" ", "") sentence_list.append(word) documents.append(" ".join(sentence_list)) documents driver.quit() # #### 참고자료 # - 패스트캠퍼스, ⟪데이터사이언스스쿨 8기⟫ 수업자료
web_08_selenium_basic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + # note - you will need to be able to run the UNIX ffmpeg utility to run this notebook # + #pip install google-cloud-storage # #!pip install google-cloud-language # #!pip install google-cloud-speech # - import urllib.request import os import glob from google.cloud import storage from google.cloud import speech from google.protobuf.json_format import MessageToDict import yaml import json import time # + with open('properties.yaml') as file: properties = yaml.full_load(file) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = properties['google_application_credentials'] # - bucket_name = properties['bucket_name'] storage_client = storage.Client() bucket = storage_client.bucket(bucket_name) url = "https://ia600702.us.archive.org/22/items/tobacco_dwu03f00/170500121_512kb.mp4" source_file_name = url.split('/')[-1] source_file_stem = source_file_name.split('.')[0] start_time = time.time() print("downloading file") urllib.request.urlretrieve(url, 'video_files/' + source_file_name) print("run time:", time.time() - start_time) start_time = time.time() blob = bucket.blob("video_files/" + source_file_name) blob.upload_from_filename('video_files/' + source_file_name) # convert locally to flac format print("converting to flac") os.system('ffmpeg -v quiet -i video_files/' + source_file_name + ' -c:a flac flac_files/' + source_file_stem + '.flac') print("run time:", time.time() - start_time) start_time = time.time() print("uploading flac file to cloud") blob = bucket.blob(source_file_name) blob = bucket.blob("flac_files/" + source_file_stem + '.flac') # upload blob.upload_from_filename('flac_files/' + source_file_stem + '.flac') print("run time:", time.time() - start_time) start_time = time.time() # extract transcript # + print("extracting transcript") client = speech.SpeechClient() gcs_uri = "gs://" + bucket_name + "/flac_files/" + source_file_stem + ".flac" #https://cloud.google.com/speech-to-text/docs/encoding #You are not required to specify the encoding and sample rate for WAV or FLAC files. #If omitted, Speech-to-Text automatically determines the encoding and sample rate for #WAV or FLAC files based on the file header. #If you specify an encoding or sample rate value that does not match the value in the #file header, then Speech-to-Text returns an error. # model='video' is not required, costs more, but might lead to better transcription audio = speech.RecognitionAudio(uri=gcs_uri) config = speech.RecognitionConfig( #encoding=speech.RecognitionConfig.AudioEncoding.FLAC, #sample_rate_hertz=16000, audio_channel_count=2, language_code="en-US", use_enhanced=True, model='video', enable_word_time_offsets=True ) operation = client.long_running_recognize(config=config, audio=audio) response = operation.result() print("run time:", time.time() - start_time) start_time = time.time() # - result_dict = MessageToDict(response.__class__.pb(response)) print(result_dict) print(result_dict['results'][0]['alternatives'][0]['transcript']) bucket.blob("video_files/" + source_file_name).delete() bucket.blob("flac_files/" + source_file_stem + '.flac').delete() os.remove("video_files/" + source_file_name) os.remove("flac_files/" + source_file_stem + '.flac')
Extract_Transcript_From_MP4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Adjusting the model # # In the previous notebook we tweaked hyperparameters for SVM in order to boost its accuracy. # # Here we will explore two other models, Random Forests and Logistic Regression in a bid to achieve more accuracy in our disease prediction question. # # The original paper explores SVM, NN, Decision Trees and Naive Bayes. # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set() from pprint import pprint from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score from sklearn.metrics import confusion_matrix,accuracy_score,f1_score,precision_score,recall_score from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression # - def evaluate_model(model, model_name, test_features, test_labels): """ Evaluate a model using test data. Print the precision. """ predictions = model.predict(test_features) accuracy = round(100*accuracy_score(test_labels, predictions),2) print("The accuracy of the {} model is {}%".format(model_name, accuracy)) # # Prepare data for the ML model # + # Import the data and build the pandas dataframe # Split into class/features features = load_breast_cancer() labels = features['target'] features = pd.DataFrame(data=features['data'], columns=features['feature_names']) # Split into train/test train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.2, random_state=1) # - # ## Feature Scaling, Min-Max Scaler # + scaler = MinMaxScaler(feature_range=(0,1)) train_features_scaled = pd.DataFrame(scaler.fit_transform(train_features)) train_features_scaled.columns = train_features.columns train_features_scaled.index = train_features.index.values test_features_scaled = pd.DataFrame(scaler.fit_transform(test_features)) test_features_scaled.columns = test_features.columns test_features_scaled.index = test_features.index.values # - # # SVM # # Lets quickly rebuild what was done in the previous notebook # + # Train the model using the hyperparameters discovered in the last notebook svm_model = SVC(C=8, kernel='linear', random_state=1) svm_model.fit(train_features_scaled, train_labels) # Test & Evaluate the model evaluate_model(svm_model, 'SVM', test_features_scaled, test_labels) # - # # Random Forests # # Random Forests do not require data normalizaton. # + # Train the model rf_model = RandomForestClassifier(random_state=1) rf_model.fit(train_features, train_labels) # Test & evaluate the model evaluate_model(rf_model, 'RF', test_features, test_labels) # - # ## Hyperparameter tunning # # **95.61%** is not bad for a starter. Lets do some hyperparameter tunning and see what we come up with. # # We shall work with the following hyperparameters ([docs](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) explaining them): # - n_estimators: number of trees in the forest # - max_features: number of features taken into consideration while building the tree # - max_depth: the maximum depth that the tree can take # - min_samples_split: the minimal number of samples required if a node is to be split further # - min_samples_leaf: the minimal number of samples before a node is declared to be a leaf # - bootstrap: random forests consist of two randomized processes. If this is False than the whole dataset is used to build each tree # # Let's first see the current state of hyperparameters. pprint(rf_model.get_params()) # + n_estimators = [x for x in range(200, 2200, 200)] max_features = ['auto', 'log2'] max_depth = [x for x in range(10, 110, 10)] max_depth.append(None) min_samples_split = [2,4,9] min_samples_leaf = [1,2,3,4] bootstrap = [True, False] # Create random grid random_grid = { 'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap } pprint(random_grid) # - # So the RandomizedSearchCV will pick different settings out of a total of 5808 combinations. We will try out 200 out of these. # + rf = RandomForestClassifier() # scoring: method of evaluating the predictions on the test set rf_model_rand = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=200, scoring='neg_mean_absolute_error', cv=3, verbose=3, random_state=1, n_jobs=-1) rf_model_rand.fit(train_features, train_labels); # - rf_model_rand.best_params_ # Test and evaluate the model evaluate_model(rf_model_rand.best_estimator_, "RF with hyperparameter improvement 1", test_features, test_labels) # Refine search with [GridSearchCV](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html?highlight=gridsearchcv#sklearn.model_selection.GridSearchCV) # + # Try out 720 different parameter sets grid_param = { 'n_estimators':[1000,1200,1400,1600], 'min_samples_split':[2,3,4,5,6,8], 'min_samples_leaf':[2,3,4,5,6,8], 'max_features': ['log2'], 'max_depth': [80, 100, 120, 140, 200] } rf = RandomForestClassifier() rf_model_grid = GridSearchCV(estimator=rf, param_grid=grid_param, cv=5, n_jobs=3, verbose=3) rf_model_grid.fit(train_features, train_labels); # - rf_model_grid.best_params_ # We can stop here and see the final performance of the RF model as compared to the SVM model. evaluate_model(rf_model, 'RF no hyperparameters', test_features, test_labels) evaluate_model(rf_model_grid.best_estimator_, "RF with hyperparameter tuning", test_features, test_labels) # And we got diminishing returns... # # Our efforts in hyperparameter tuning for RF were not successful # ## Variable Importances # # Moreover we can check variable importances and see if we can boost the RF model's performance in this way. # + importances = list(rf_model.feature_importances_) # plot plt.figure(figsize=(10,8)) plt.bar(list(features.columns), importances) plt.xticks(rotation='vertical') plt.xlabel("Features"); plt.ylabel("Importances"); plt.title("Variable Importances"); plt.show() # - # ### Build a model with the top 8 most important variables # # Since we see that not all variables are equally important, lets pick the top 8 most important and build a model out of that. # + n_features = 8 # Sort importances and pick the top 8 importances = [(name, imp) for imp, name in zip(importances,features.columns)] importances.sort(key=lambda tup: tup[1], reverse=True) top_feat = [importances[i][0] for i in range(n_features)] top_feat # + important_features_train = train_features[top_feat] important_features_test = test_features[top_feat] # Train the new model rf_impt_feat = RandomForestClassifier(random_state=1) rf_impt_feat.fit(important_features_train, train_labels) evaluate_model(rf_impt_feat, "RF only with important features", important_features_test, test_labels) # - # The accuracy does not increase. Despite our efforts it seems that we cannot further boost this model. We now move to logistic regressions. # # [Logistic Regression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) # # We saw a little bit in the Random Forest model that not all features are equally important. Some are much more important than others. Therefore we shall use Lasso Regression so that the algorithm discards potentially useless features. Lasso Regression can shrink the slope all the way to 0 thereby removing redundant features. # ## Scaling # ### Min-Max # + # Train lr1 = LogisticRegression(random_state=1, penalty='l1', solver='liblinear') lr1.fit(train_features_scaled, train_labels) # Test & Evaluate evaluate_model(lr1, "Logistic Regression", test_features_scaled, test_labels) # - # ### Standard Scaler # + scaler2 = StandardScaler() train_features_scaled2 = pd.DataFrame(scaler2.fit_transform(train_features)) train_features_scaled2.columns = train_features.columns train_features_scaled2.index = train_features.index.values test_features_scaled2 = pd.DataFrame(scaler2.fit_transform(test_features)) test_features_scaled2.columns = test_features.columns test_features_scaled2.index = test_features.index.values # Train model lr2 = LogisticRegression(random_state=1, penalty='l1', solver='liblinear') lr2.fit(train_features_scaled2, train_labels) # Test & Evaluate evaluate_model(lr2, "Logistic Regression", test_features_scaled2, test_labels) # - # So, Standard Scaler seems to perform much better for logistic regression, namely **97.37%** as opposed to the **95.61%** of the Min-Max Scaler. # ## Evaluation of the Model using Cross Validation lr_accuracy = cross_val_score(estimator=lr2, X=train_features_scaled2, y=train_labels, cv=10) print("Accuracy of the Linear Regression model with a 10-fold CV:", round(100*np.mean(lr_accuracy), 2)) # We do not notice any improvement... # # Conclusions evaluate_model(svm_model, 'SVM', test_features_scaled, test_labels) evaluate_model(lr2, "Logistic Regression", test_features_scaled2, test_labels) evaluate_model(rf_model, 'RF', test_features, test_labels) # We saw 3 models applied on our dataset on a bid to increase the accuracy. The scoring is as follows: # # 1. SVM: **98.25%** # 2. Logistic Regression: **97.37%** # 3. Random Forests: **95.61%** # # The referenced [paper](https://www.sciencedirect.com/science/article/pii/S1877050916302575) has these results: # 1. SVM: **97.13%** # 2. Naive Bayes: **95.99%** # 3. k-NN: **95.27%** # 4. Decision Trees: **95.13%** # # As can be seen our SVM model performs better than the papers and this can be probably credited to the kernel choice resulting from the hyperparameter tuning, as we saw in the last notebook (we use a linear instead of a rbf kernel). # # Moreover, the Logistic Regression model also performs better than the paper's SVM. # # Random Forests have a disappointing performance when compared to SVM. #
Disease Predictor/Model Improvement.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # %load_ext autoreload # %autoreload 2 import os,sys sys.path.insert(0,"..") from glob import glob import matplotlib.pyplot as plt import numpy as np import torch import torchvision import torchxrayvision as xrv # + import torchvision, torchvision.transforms transform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),xrv.datasets.XRayResizer(224)]) data_transforms = torchvision.transforms.Compose([ xrv.datasets.ToPILImage(), torchvision.transforms.RandomAffine(45, translate=(0.15, 0.15), scale=(0.85, 1.15)), torchvision.transforms.ToTensor() ]) # - #COVID-19 Dataset d_covid_pa = xrv.datasets.COVID19_Dataset(imgpath="/Users/ieee8023/git/covid-chestxray-dataset/images", csvpath="/Users/ieee8023/git/covid-chestxray-dataset/metadata.csv", views=["PA"], transform=transform) #COVID-19 Dataset d_covid_ap = xrv.datasets.COVID19_Dataset(imgpath="/Users/ieee8023/git/covid-chestxray-dataset/images", csvpath="/Users/ieee8023/git/covid-chestxray-dataset/metadata.csv", views=["AP", "AP Supine"], transform=transform) # + d1 = d_covid_pa d2 = d_covid_ap covariate_train05 = xrv.datasets.CovariateDataset(d1 = d1, d1_target = d1.labels[:,d1.pathologies.index("Viral")], d2 = d2, d2_target = d2.labels[:,d2.pathologies.index("Viral")], mode="train", verbose=True, ratio=0.5) covariate_train09 = xrv.datasets.CovariateDataset(d1 = d1, d1_target = d1.labels[:,d1.pathologies.index("Viral")], d2 = d2, d2_target = d2.labels[:,d2.pathologies.index("Viral")], mode="train", verbose=True, ratio=0.9) # - def get_label_images(d, num = 10): np.random.seed(10) site_0 = np.where(d.labels[:,0] == 0)[0] #print(site_0) site_1 = np.where(d.labels[:,0] == 1)[0] #print(site_1) img_0 = np.zeros((224, 224)) img_1 = np.zeros((224, 224)) np.random.shuffle(site_0) for i, idx in enumerate(site_0): e = d[idx] img_0 += e["img"][0] if i > num: break np.random.shuffle(site_1) for i, idx in enumerate(site_1): e = d[idx] img_1 += e["img"][0] if i > num: break return (img_0, img_1) train_0, train_1 = get_label_images(covariate_train09, 60) # + fig, ((ax1, ax2, ax3)) = plt.subplots(figsize=(10,10), nrows=1, ncols=3) #, (ax4, ax5, ax6) ax1.imshow(train_0, cmap="Greys_r") ax1.set_title('Train Class 0') ax1.axis('off') ax2.imshow(train_1, cmap="Greys_r") ax2.set_title('Train Class 1') ax2.axis('off') dif = train_0-train_1 lim = np.max(np.abs(dif)) ax3.imshow(dif, vmin=-lim,vmax=lim) ax3.set_title('Train Class 0 - Train Class 1'); ax3.axis('off') plt.tight_layout() # - train_0, train_1 = get_label_images(covariate_train05, 60) # + fig, ((ax1, ax2, ax3)) = plt.subplots(figsize=(10,10), nrows=1, ncols=3) #, (ax4, ax5, ax6) ax1.imshow(train_0, cmap="Greys_r") ax1.set_title('Train Class 0') ax1.axis('off') ax2.imshow(train_1, cmap="Greys_r") ax2.set_title('Train Class 1') ax2.axis('off') dif = train_0-train_1 #lim = np.max(np.abs(dif)) # use existing limit ax3.imshow(dif, vmin=-lim,vmax=lim) ax3.set_title('Train Class 0 - Train Class 1'); ax3.axis('off') plt.tight_layout() # -
scripts/xray_datasets-CovariateShift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Scraping # language: python # name: scraping # --- # ## HTML Essentials # + language="html" # <html> # <head> # <title>Page Title</title> # </head> # <body> # <h1>This is a Heading</h1> # <p>This is a paragraph.</p> # <a href="https://www.w3schools.com/">This is a link to tutorials</a> # </body> # </html> # - # ## CSS Classes # + language="html" # <p class="center medium">This paragraph refers to two classes.</p> # - html_string = r"""<html> <head> <title>Page Title</title> </head> <body> <h1>This is a Heading</h1> <p>This is a paragraph.</p> <a href="https://www.w3schools.com/">This is a link to tutorials</a> </body> </html>""" from bs4 import BeautifulSoup html_soup = BeautifulSoup(html_string, 'html.parser') print(html_soup) html_soup.h1 html_soup.h1.text html_soup.body html_soup.a html_soup.a.get('href') # ### Requests & Responses import requests response = requests.get('https://www.bundes-telefonbuch.de/nuernberg/firma/karosseriefachbetrieb-hofer-tb1150222') print(response.text) response.status_code # ### Beautiful Soup entry_soup = BeautifulSoup(response.text, 'html.parser') print(entry_soup) entry_soup.h1 entry_soup.h1.text.strip() entry_soup.find('a', {'class':'detail-email'}) entry_soup.find('a', {'class':'detail-email'}).get('href') address_tag = entry_soup.find('div', {'class':'detail-address'}) print(address_tag) address_tag.text.strip() address_tag.stripped_strings address_parts = list(address_tag.stripped_strings) print(address_parts) ' '.join(address_parts) # ## Create Scrapy Project scrapy startproject yellow_pages # cd yellow_pages scrapy genspider telefonbuch bundes-telefonbuch.de # ## Scrapy Shell for debugging scrapy shell 'www.bundes-telefonbuch.de' # ### Scrapy: Fill Out Forms fetch(scrapy.FormRequest.from_response(response, formid='searchForm', formdata={'what':'auto', 'where':'Muenchen'})) response.css('div.companyBox') results_soup = BeautifulSoup(response.text,'html.parser') results_soup.prettify() # ### Extract entries entry = results_soup.find('div',{'class':'companyBox'}) entries = result_soup.find_all('div', {'class':'companyBox'}) len(entries) # ### Paging paging = result_soup.find('div',{'id':'pagination'}) pages = paging.find_all('a') len(pages) next_page = None for page in pages: if page.text.strip()=='∨': next_page = page.get('href') break start_urls = ['http://bundes-telefonbuch.de/'] if next_page: next_page = start_urls[0] + next_page print(next_page) # ### Get the details of the entry entry_url = entry.a.get('href') fetch(scrapy.http.Request(start_urls[0] + entry_url)) entry_soup = BeautifulSoup(response.text,'html.parser') name = entry_soup.h1.text.strip() print(name) address_div = entry_soup.find('div', {'class':'detail-address'}) print(address_div.text) print(address_div.stripped_strings) address_parts = list(address_div.stripped_strings) print(address_parts) address = ' '.join(address_parts) print(address) email = entry_soup.find('a',{'class':'detail-email'}).get('href') print(email) website = entry_soup.find('a',{'class':'detail-homepage'}).get('href') print(website) tel = entry_soup.find('span', {'class':'detail-fax'}).text.strip() fax = entry_soup.find('span', {'class':'detail-phone'}).text.strip() print(tel, fax) # ## Run crawler scrapy crawl telefonbuch -a sector="Auto" -a city="Muenchen" -o "Auto_Muenchen.csv"
Scraping_demo-live.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def my_for(iterable, func): iterator = iter(iterable) while True: try: thing = next(iterator) except StopIteration: print("\nEND OF ITERATOR\n\n") break else: func(thing) def square(x): print(x**2) my_for('SubhadeepBanerjee', print) my_for([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], square) # + class Counter(object): def __init__(self, low, high): self.current = low self.high = high def __iter__(self): return self def __next__(self): if self.current < self.high: num = self.current self.current += 1 return num raise StopIteration for x in Counter(20,50): print(x) # + from random import shuffle class Card: def __init__(self, suit, value): self.suit = suit self.value = value def __repr__(self): return f"{self.value} of {self.suit}" class Deck: def __init__(self): suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades'] values = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] self.cards = [Card(suit, value) for suit in suits for value in values] def __repr__(self): return f"Deck of {self.count()} cards." def __iter__(self): return iter(self.cards) # VERSION USING A GENERATOR FUNCTION # (covered in the next video) # def __iter__(self): # for card in self.cards: # yield card def reset(self): suits = ['Hearts', 'Diamonds', 'Clubs', 'Spades'] values = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K'] self.cards = [Card(suit, value) for suit in suits for value in values] return self def count(self): return len(self.cards) def _deal(self, num): """ Return a list of cards dealt """ count = self.count() actual = min([num, count]) # make sure we don't try to over-deal if count == 0: raise ValueError("All cards have been dealt") if actual == 1: return [self.cards.pop()] cards = self.cards[-actual:] # slice off the end self.cards = self.cards[:-actual] # adjust cards return cards def shuffle(self): if self.count() < 52: raise ValueError("Only full decks can be shuffled") shuffle(self.cards) return self def deal_card(self): """ Returns a single Card """ return self._deal(1)[0] def deal_hand(self, hand_size): """ Returns a list of Cards """ return self._deal(hand_size) my_deck = Deck() # my_deck.shuffle() for card in my_deck: print(card) # -
Iterators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.12 ('decimer_segmentation_package') # language: python # name: python3 # --- # + import os import numpy as np from pdf2image import convert_from_path from PIL import Image from decimer_segmentation import segment_chemical_structures, segment_chemical_structures_from_file # - # ## Define pdf path and convert to image # # Please uncomment according to the system you are using. # If you are using Windows, make sure, poppler is installed properly. # On Linux and MacOS, this should work properly without any problems. # # #### Uncomment below according to your needs. # + path = os.path.abspath('./Validation/test_page.pdf') #If you are using Windows, make sure to have poppler installed and show pdf2image where to find it. poppler_path = "C:\\Program Files (x86)\\poppler-0.68.0\\bin" # CHANGE PATH pages = convert_from_path(path, 300, poppler_path = poppler_path) # If you are using Linux or MacOS, this should hopefully have been installed via conda/pip. # In that case, you just run: #pages = convert_from_path(path, 500) pages[0] # - # ## Run the detection using the Mask R CNN model segments = segment_chemical_structures(np.array(pages[0]), expand=False, visualization=True) # ## Run DECIMER Segmentation with the mask expansion # segments = segment_chemical_structures(np.array(pages[0]), expand=True, visualization=True) # ## Run DECIMER Segmentation directly on a file # # If you run it on a pdf_file on Windows, you may need to adapt the poppler_path argument of segment_chemical_structures_from_file. If the poppler installation is not found, it tries to find it at "C:\\Program Files (x86)\\poppler-0.68.0\\bin". If you have installed poppler elsewhere, please adapt the path. path = os.path.abspath('./Validation/test_page.pdf') segments = segment_chemical_structures_from_file(path, expand=True, poppler_path=None) # Show first segment Image.fromarray(segments[0])
DECIMER_Segmentation_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Gruppe3VDL/3D_computer_vision/blob/master/Test_Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ey9TTcl5rwMT" colab_type="code" outputId="4b5e973a-b623-409d-b992-14510f783be7" colab={"base_uri": "https://localhost:8080/", "height": 445} import os, sys import numpy as np import cv2 as cv import scipy.io as io def project_points(X, K, R, T, distortion_flag=False, distortion_params=None): """Project points from 3d world coordinates to 2d image coordinates. Your code should work with considering distortion and without considering distortion parameters. """ # Your implementation goes here! raise NotImplementedError def project_and_draw(img, X_3d, K, R, T, distortion_flag, distortion_parameters): """ call "project_points" function to project 3D points to camera coordinates draw the projected points on the image and save your output image here # save your results in a separate folder named "results" # Your implementation goes here! """ # Your implementation goes here! raise NotImplementedError if __name__ == '__main__': base_folder = './data/' # Consider distorition dist_flag = True # Load the data # There are 25 views/or images/ and 40 3D points per view data = io.loadmat('https://github.com/Gruppe3VDL/3D_computer_vision/blob/master/ex_1_data.mat?raw=true') # 3D points in the world coordinate system X_3D = data['x_3d_w'] # shape=[25, 40, 3] # Translation vector: as the world origin is seen from the camera coordinates TVecs = data['translation_vecs'] # shape=[25, 3, 1] # Rotation matrices: project from world to camera coordinate frame RMats = data['rot_mats'] # shape=[25, 3, 3] # five distortion parameters dist_params = data['distortion_params'] # K matrix of the cameras Kintr = data['k_mat'] # shape 3,3 imgs_list = [cv.imread(base_folder+str(i).zfill(5)+'.jpg') for i in range(TVecs.shape[0])] imgs = np.asarray(imgs_list) project_and_draw(imgs, X_3D, Kintr, RMats, TVecs, dist_flag, dist_params) # + id="jZZTkV2jLxJc" colab_type="code" outputId="8946d601-bc1a-42e5-bc89-f876d5d94657" colab={"base_uri": "https://localhost:8080/", "height": 122} from google.colab import drive drive.mount('/content/drive')
Exercise2/Test_Notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![alt text](images/Kusto.jpg "Kusto Magic") # # Kqlmagic Quick Start # 1. Press F11 to switch browser to full screen mode # 2. If in running jupyter lab, in View tab, select 'Single-Document Mode' and 'Presentation Mode' or (Ctrl + Shift + D) # 3. In Kernel tab, select 'Restart Kernel and Clear All Outputs...', to start from fresh # 4. Repeat: press Shift+Enter to run cells sequentially # ## Make sure that you have the lastest version of Kqlmagic # Download Kqlmagic from PyPI and install/update # (if latest version is already installed you can skip this step) # + # #!pip install Kqlmagic --no-cache-dir --upgrade # - # ## If you want to pre-set defaults, set environment varaible, before activation notebook, or within notebook # - **KQLMAGIC_NOTEBOOK_APP** - set the notebook application (default **jupyternotebook**) # - **KQLMAGIC_LOAD_MODE** - set the kqlmagic load mode (default **verbose**). If silent, it will load without welcome output # - **KQLMAGIC_CONFIGURATION** - modify the default configuration, key=value pairs must be separated by a **semicolon** # # - **KQLMAGIC_CONNECTION_STR** - set the default connection string (default **None**). If set it will connect to it on Kqlmagic load # + # you can try it within the notebook, by setting the environment variable using %env magic # #%env KQLMAGIC_NOTEBOOK_APP=jupyterlab # #%env KQLMAGIC_LOAD_MODE=silent # #%env KQLMAGIC_CONFIGURATION="show_query_time=False;plot_package='plotly';display_limit=100" # #%env KQLMAGIC_CONNECTION_STR=appinsights://appid='DEMO_APP';appkey='DEMO_KEY' # #%env KQLMAGIC_LOG_LEVEL=DEBUG # #%env KQLMAGIC_LOG_FILE_MODE=Append # #%env KQLMAGIC_LOG_FILE=michael.log # #%env KQLMAGIC_LOG_FILE_PREFIX=myLog # - # ## Add Kqlmagic to notebook magics # %reload_ext Kqlmagic # %kql --usage # %kql --help "kql" # - ### <span style="color:#82CAFA">*Note: information is included in the Kqlmagic banner, and in the note below it, with version and location of the open source code*</span> # - ### <span style="color:#82CAFA">*Note: after Kqlmagic was loaded, kql reference was added to Help menu (Help tab)*</span> # ## Get help # %kql --help "help" -pw # ## Get Application Insights reference # %kql --help "ApplicationInsights" # ## Connect to appid@appinsights # there are few options to authenticate with Apllication Insights:<br> # ```python # # %kql appinsights://appid='<app-id>';appkey='<app-key>';alias='<appid-friendly-name>' # ``` # ```python # # %kql appinsights://code;appid='<app-id>';alias='<appid-friendly-name>' # ``` # ```python # # %kql appinsights://tenant='<tenant-id>';clientid='<aad-appid>';clientsecret='<aad-appkey>';appid='<app-id>';alias='<appid-friendly-name>' # ``` # ```python # # %kql appinsights://username='<username>';password='<password>';appid='<app-id>';alias='<appid-friendly-name>' # ``` # ```python # # %kql appinsights://anonymous;appid='<app-id>';alias='<appid-friendly-name>' # ``` # # Notes:<br> # (1) username/password works only on corporate network.<br> # (2) alias is optional.<br> # (3) if credentials are missing, and a previous connection was established the credentials will be inherited.<br> # (4) if secret (password / clientsecret / appkey) is missing, user will be prompted to provide it.<br> # (5) if tenant is missing, and a previous connection was established the tenant will be inherited.<br> # (6) **a not quoted value, is a python expression, that is evaluated and its result is used as the value. This is how you can parametrize the connection string** # <br>(7) anonymous authentication, is NO authentication, for the case that your cluster is local. # %kql appinsights://appid='DEMO_APP';appkey='DEMO_KEY' # - ### <span style="color:#82CAFA">*Note - popup window that opened with the appid@appinsights schema*</span> # - ### <span style="color:#82CAFA">*Note - Help menu contains the appid@appinsights schema*</span> # ## Get help on connection string, how to authenticate and get access to you data resource # %kql --help "conn" -popup_window # ## Once connected you can query the current appid@appinsights # %kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 # ## Python code can access the result by the \_ variable _ # ## Converts result data to a dataframe df = _.to_dataframe() df df.head() df.client_City df.min() df.to_json() df.columns # ## Excution of another cell overrides \_ , However, \_kql\_raw\_result\_ python variable holds last kql query result _ _kql_raw_result_ _ # ## Show a chart, render the query to the proper chart # %kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render piechart title='my apple pie' # - ### <span style="color:#82CAFA">*Note: the chart is interactive, hoover to the right upper corner to get the menu, click on the legend to toggle facets, hoover on the chart to get value and percentage*</span> # ## Assign kql query result to a python variable # %kql my_bar_chart << pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render barchart title='my bar chart' # - ### <span style="color:#82CAFA">*Note: nothing is displayed when the result is assigtned to a variable*</span> # ## Display kql result object my_bar_chart # ## Access table data my_bar_chart.show_table() my_bar_chart[1] my_bar_chart[1][1:] my_bar_chart[1]['client_City'] my_bar_chart[1:5] # requests = %kql requests | project name, customDimensions | limit 5 requests requests[1:3] requests[1]['name'] requests[1]['customDimensions'] requests[1]['customDimensions']['_MS.ProcessedByMetricExtractors'] # - ### <span style="color:#82CAFA">*Note: that you can access directly dynamic object data.*</span> df = requests.to_dataframe() df df['customDimensions'][0]['_MS.ProcessedByMetricExtractors'] # - ### <span style="color:#82CAFA">*Note: that access to data in dataframe is different, you first specify the column and then the row.*</span> df['name'][1] # - ### <span style="color:#82CAFA">*Note: that datetime in dataframe is Timestamp.*</span> # ## Cell mixed with single line kql queries # %kql bar_chart << pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render barchart title='my bar chart' print(bar_chart) # %kql pie_chart << pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render piechart title='my pie chart' display(bar_chart) display(pie_chart) # - ### <span style="color:#82CAFA">*Note: single line magics (starts with only one \%) can share the same cell with python code*</span> # ## Multiline query # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render columnchart title='my column chart' # - ### <span style="color:#82CAFA">*Note: that this is a multiline magic, it starts with \%\%. It can include only kql code*</span> # ## Show last chart result as a table _.show_table() # ## Show last result in a popup window _.popup() # ## Resize poup window, and to adjust the chart inside the window press F5 # - ### <span style="color:#82CAFA">*Note: when you F5, you loose the latest chart display state*</span> # ## Show last chart result as a table in a popup window _kql_raw_result_.popup_table() # ## More complex chart # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | extend count2 = count_*count_ | limit 10 | render piechart title='my pie chart #2' # ## Convert result data to dictionary of columns dict_of_columns = _kql_raw_result_.to_dict() print(dict_of_columns) _kql_raw_result_.to_csv('csv_file.csv') # #%cat csv_file.csv # # Get last kql query attributes # ## Query string _kql_raw_result_.query # ## Query render title _kql_raw_result_.title # ## Query render visualization _kql_raw_result_.visualization # ## Number of records in query response _kql_raw_result_.records_count # ## List of columns in query table response _kql_raw_result_.columns_name # ## Kqlmagic time metric (in seconds) _kql_raw_result_.start_time _kql_raw_result_.end_time _kql_raw_result_.elapsed_timespan # ## Query connection database@cluster _kql_raw_result_.connection # ## Query Options (settings) _kql_raw_result_.options # ## Completion query information (query metadata) _kql_raw_result_.completion_query_info # ## Completion query resource consumption (query metadata) _kql_raw_result_.completion_query_resource_consumption _kql_raw_result_.completion_query_resource_consumption['resource_usage']['cache']['memory']['hits'] # ## Completion query data set info (query metadata) _kql_raw_result_.dataSetCompletion # ## Query raw json response _kql_raw_result_.raw_json _kql_raw_result_.raw_json['Tables'][1]['TableName'] _kql_raw_result_.raw_json['Tables'][1]['Columns'][0]['ColumnName'] _kql_raw_result_.raw_json['Tables'][3]['Rows'][0][1] # # Advanced part # ## Kql query with option -c2lv (-columns_to_local_vars) # %%kql -c2lv pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render columnchart title='my column chart' # - ### <span style="color:#82CAFA">*Note: option are specified before the query*</span> # - ### <span style="color:#82CAFA">*Note: c2lv is abbreviation of columns_to_local_vars options*</span> # - ### <span style="color:#82CAFA">*Note: columns_to_local_vars option returns data result in local variables from column names*</span> # ### result data is assigned to local variables with columns names client_City count_ # - ### <span style="color:#82CAFA">*Note - current appid@appinsights is always shown*</span> # ## Kql query with negated option -f (-feedback) # %%kql -!feedback pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | limit 10 | render barchart title='my bar chart' # - ### <span style="color:#82CAFA">*Note: the feedback message 'Done..... records' was not displayed because of the negated feedback option*</span> # - ### <span style="color:#82CAFA">*Note: option is negated by adding \! before the option*</span> # - ### <span style="color:#82CAFA">*Note: full option name instead of abbreviation can be used too*</span> # - ### <span style="color:#82CAFA">*Note: feedback option is on by default, so to turn it off, it must be negated*</span> # ## Get default value of option feedback (f) # %config Kqlmagic.feedback # - ### <span style="color:#82CAFA">*Note: options have defaults, and can be accessed to read or modify using %config magic*</span> # ## Set default value of option feedback (f) to False # %config Kqlmagic.feedback=False # %config Kqlmagic.feedback # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | extend count2 = count_*count_ | limit 5 # - ### <span style="color:#82CAFA">*Note: feedback message was not displayed, because the default feedback option was set to False*</span> # %config Kqlmagic.feedback=True # %config Kqlmagic.feedback # ## Show all option defaults with help # %config Kqlmagic # ## Show query results in a popup window # %%kql -pw let randn = () {rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand() + rand()}; range c from 1 to 1000 step 1 | limit 1000 | extend e1 = randn() | extend e2 = randn() | extend e3 = randn() | extend e4 = randn() | extend e5 = randn() | project e1,e2,e3,e4,e5 | render scatterchart title="Michael's chart" # - ### <span style="color:#82CAFA">*Note: popup_window option (abbr: pw) cause the result to be displayed in a popup window*</span> # ## Empty table # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | extend count2 = count_*count_ | limit 0 # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | extend count2 = count_*count_ | limit 0 | render piechart title='my chart' len(_kql_raw_result_) _kql_raw_result_.popup() _kql_raw_result_.show_table() df = _kql_raw_result_.to_dataframe() df df.to_json() _kql_raw_result_.raw_json # ## Kql request with fork - return multiple tables # %%kql pageViews | where client_City != '' | summarize count() by client_City | sort by count_ | extend count2 = count_*count_ | fork (limit 10) (project client_City | limit 5) _kql_raw_result_.fork_result(1) # ## Submit same query _kql_raw_result_.submit() _kql_raw_result_.fork_result(1) # ## Refresh query _kql_raw_result_.fork_result(1).refresh() _kql_raw_result_.fork_result(0) _kql_raw_result_
notebooks/QuickStartAI.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.5 64-bit ('env') # metadata: # interpreter: # hash: d57cf295867dfcf8afaf623a452fd288426cc9eed0701f0d455475125110c7a6 # name: Python 3.8.5 64-bit ('env') # --- import sys sys.version # + tags=[] import json from matplotlib import pyplot as plt import pandas as pd pd.set_option('max_columns', 0) def set_colors(fg='w', bg='#222'): plt.rcParams.update({ 'xtick.color': fg, 'ytick.color': fg, 'text.color': fg, 'axes.labelcolor': fg, 'axes.edgecolor': fg, 'axes.facecolor': bg, 'figure.facecolor': bg, }) set_colors() # - # ## new output # -> 10x smaller without confirmed (2.7M) # -> 7x smaller with confirmed (3.4M) # -> 25x smaller with new, d* (935k) # !ls -lh ../output/table.json # !ls -lh ../output/info.json with open('../output/table.json') as f: table = json.load(f) with open('../output/info.json') as f: info = json.load(f) def fillnarows(table): return [row if row is not None else [None] * 3 for row in table] pd.DataFrame(fillnarows(table[-1]), index=info['rows'], columns=info['cols']) df = pd.DataFrame( [row for _ in map(fillnarows, table) for row in _], index=pd.MultiIndex.from_product((info['dates'], info['rows'])), columns=info['cols'], ) df.index = df.index.set_names(('date', 'country')) print(df.values.shape) df.head() df.loc[(slice(info['dates'][0], info['dates'][-1]), 'Afghanistan'), :] df.loc[(slice(None), 'Afghanistan'), :] idx = pd.IndexSlice df.loc[idx[:, 'Afghanistan'], :] df.xs('Afghanistan', level=1) # drop_level=True df.unstack().rolling(7).mean().loc[:, idx['new', ('Switzerland', 'Germany')]].plot(); # ## old output path = '../output/bydate.json' bydate = json.load(open(path)) # !ls -lh $path len(bydate) # level: country, state, county (not currently) row = bydate[-1] data = row['data'] row.keys(), row['date'], list(data.keys())[0], data['Afghanistan'] def get_country(country): return pd.DataFrame([ dict(date=row['date'], **row['data'][country]) for row in bydate if country in row['data'] ]).set_index('date') country = get_country('Afghanistan') # so confirmed is basically == cumsum(new) (country.confirmed - country.new.cumsum()).value_counts() # so recovered is basically == confirmed - deaths - active (country.confirmed - country.deaths - country.active - country.recovered).value_counts() keys = pd.DataFrame(({ 'key': key, 'key0': key.split(',')[0] if ',' in key else None, 'level': value['level'] } for key, value in data.items())) keys.level.value_counts() keys.key0.value_counts() sizes = pd.DataFrame(({'date': _['date'], 'kb_per_day': len(json.dumps(_['data']))/1024} for _ in bydate)) sizes.set_index('date').kb_per_day.plot(); bydate_countries = [ dict( date=row['date'], data={ k: v for k, v in row['data'].items() if v['level'] == 'country' } ) for row in bydate ] len(json.dumps(bydate_countries)) / len(json.dumps(bydate)) # ## covid_19 : CH data # + tags=[] df = pd.read_csv('covid_19/COVID19_Fallzahlen_CH_total_v2.csv') print(len(df)) df.tail() # - ddf = df.set_index(['date', 'abbreviation_canton_and_fl']).loc[ :, ['ncumul_conf', 'ncumul_deceased', 'ncumul_released']].rename( columns=dict(ncumul_conf='confirmed', ncumul_deceased='deaths', ncumul_released='recovered') ) ddf.unstack().tail() # "recovered" often missing, unfortunately ... ddf.recovered.unstack() ddf.loc[(slice(None), ('AG',)), 'confirmed'].plot() print('\n'.join(sorted(df.columns))) # + tags=[] # new ZH cases plt.figure(figsize=(15, 5)) df[df.abbreviation_canton_and_fl == 'ZH'].set_index('date').ncumul_conf.diff().plot(); # - # new CH cases (filled) plt.figure(figsize=(15, 5)) df.set_index(['date', 'abbreviation_canton_and_fl']).ncumul_conf.unstack().fillna(method='ffill').stack().reset_index().rename(columns={0: 'ncumul_conf'}).groupby('date').ncumul_conf.sum().diff().plot(); # latest new cases by canton (filled) df.set_index(['date', 'abbreviation_canton_and_fl']).ncumul_conf.unstack().fillna(method='ffill').diff().tail(10).astype(int) df[df.date == df.date.max()] # ## ZH data # Unfortunately only until July. df = pd.read_csv('covid_19/fallzahlen_kanton_alter_geschlecht_csv/COVID19_Fallzahlen_Kanton_ZH_alter_geschlecht.csv') df.AgeYear.hist() print(len(df), ','.join(df.columns), 'latest date', df.Date.max()) col = 'AgeYear' # limitcol = 'AgeYear' # limits = (10, 40, 60) limitcol = 'Date' limits = ('2020-05-01', '2020-06-01', '2020-07-01') _, axs = plt.subplots(len(limits) + 1, 2, figsize=(2 * 3, (len(limits) + 1) * 3)) for y in range(len(limits) + 1): for x, gender in enumerate('FM'): ax = axs[y][x] sel = df.Gender == gender title = f'gender={gender} ' if y == len(limits): sel &= df[limitcol] > limits[-1] title += f'{limitcol} > {limits[-1]}' elif y > 0: sel &= (limits[y - 1] <= df[limitcol]) & (df[limitcol] < limits[y]) title += f'{limits[y - 1]} <= {limitcol} < {limits[y]}' else: sel &= df[limitcol] < limits[0] title += f'{limitcol} < {limits[0]}' ax.hist(df.loc[sel, col]) ax.set_title(title)
inputs/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## PCA # + import warnings warnings.filterwarnings('ignore') # %matplotlib inline # - import numpy as np from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import proj3d from matplotlib.patches import FancyArrowPatch # + import matplotlib.pyplot as plt import matplotlib.colors as mcolors plt.style.use('fivethirtyeight') plt.rcParams['font.family'] = 'sans-serif' plt.rcParams['font.serif'] = 'Ubuntu' plt.rcParams['font.monospace'] = 'Ubuntu Mono' plt.rcParams['font.size'] = 10 plt.rcParams['axes.labelsize'] = 10 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['axes.titlesize'] = 10 plt.rcParams['xtick.labelsize'] = 8 plt.rcParams['ytick.labelsize'] = 8 plt.rcParams['legend.fontsize'] = 10 plt.rcParams['figure.titlesize'] = 12 plt.rcParams['image.cmap'] = 'jet' plt.rcParams['image.interpolation'] = 'none' plt.rcParams['figure.figsize'] = (16, 8) plt.rcParams['lines.linewidth'] = 2 plt.rcParams['lines.markersize'] = 8 colors = ['#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09', '#008fd5', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b', '#810f7c', '#137e6d', '#be0119', '#3b638c', '#af6f09'] cmap = mcolors.LinearSegmentedColormap.from_list("", ["#82cafc", "#069af3", "#0485d1", colors[0], colors[8]]) # + mu_vec1 = np.array([0,0,0]) cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]]) class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20).T mu_vec2 = np.array([1,1,1]) cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]]) class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20).T # + fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') plt.rcParams['legend.fontsize'] = 10 ax.plot(class1_sample[0,:], class1_sample[1,:], class1_sample[2,:], 'o', markersize=8, color=colors[0], alpha=0.7, label='class1') ax.plot(class2_sample[0,:], class2_sample[1,:], class2_sample[2,:], 'o', markersize=8, alpha=0.7, color=colors[1], label='class2') plt.title('Samples for class 1 and class 2') ax.legend(loc='upper right') plt.show() # - all_samples = np.concatenate((class1_sample, class2_sample), axis=1) all_samples.shape # + mean_x = np.mean(all_samples[0,:]) mean_y = np.mean(all_samples[1,:]) mean_z = np.mean(all_samples[2,:]) mv = np.array([[mean_x],[mean_y],[mean_z]]) print('Mean Vector:') print(mv) # - S = np.dot((all_samples-mv),(all_samples-mv).T) print('Scatter Matrix:') print(S) cov_mat = np.cov(all_samples) print('Covariance Matrix:') print(cov_mat) # + # eigenvectors and eigenvalues from the scatter matrix eig_val_sc, eig_vec_sc = np.linalg.eig(S) # eigenvectors and eigenvalues for the from the covariance matrix eig_val_cov, eig_vec_cov = np.linalg.eig(cov_mat) for i in range(len(eig_val_sc)): eigvec_sc = eig_vec_sc[:,i].reshape(1,3).T eigvec_cov = eig_vec_cov[:,i].reshape(1,3).T print('Autovettore {0:d}: {1:s}'.format(i+1, str(eigvec_sc.T))) print('Autovalore {0:d} da scatter matrix: {1:12f}'.format(i+1, eig_val_sc[i])) print('Autovalore {0:d} da covariance matrix: {1:.12f}'.format(i+1, eig_val_cov[i])) print('Rapporto: {0:.12f}'.format(eig_val_sc[i]/eig_val_cov[i])) print(40 * '-') # + class Arrow3D(FancyArrowPatch): def __init__(self, xs, ys, zs, *args, **kwargs): FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) self.set_positions((xs[0],ys[0]),(xs[1],ys[1])) FancyArrowPatch.draw(self, renderer) fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111, projection='3d') ax.plot(all_samples[0,:], all_samples[1,:], all_samples[2,:], 'o', markersize=8, color=colors[0], alpha=0.5) ax.plot([mean_x], [mean_y], [mean_z], 'o', markersize=10, color=colors[1], alpha=0.5) for v in eig_vec_sc.T: a = Arrow3D([mean_x, v[0]], [mean_y, v[1]], [mean_z, v[2]], mutation_scale=20, lw=2, arrowstyle="-|>", color=colors[7]) ax.add_artist(a) ax.set_xlabel('x_values') ax.set_ylabel('y_values') ax.set_zlabel('z_values') plt.title('Eigenvectors') plt.show() # + # Make a list of (eigenvalue, eigenvector) tuples eig_pairs = [(np.abs(eig_val_sc[i]), eig_vec_sc[:,i]) for i in range(len(eig_val_sc))] # Sort the (eigenvalue, eigenvector) tuples from high to low eig_pairs.sort(key=lambda x: x[0], reverse=True) # Visually confirm that the list is correctly sorted by decreasing eigenvalues for i in eig_pairs: print(i[0], i[1]) # - matrix_w = np.hstack((eig_pairs[0][1].reshape(3,1), eig_pairs[1][1].reshape(3,1))) print('Matrix W:') print(matrix_w) Xp = matrix_w.T.dot(all_samples-mv) # + fig = plt.figure(figsize=(16,8)) plt.plot(Xp[0,0:20], Xp[1,0:20], 'o', markersize=7, color=colors[0], alpha=0.7, label='class1') plt.plot(Xp[0,20:40], Xp[1,20:40], 'o', markersize=7, color=colors[1], alpha=0.7, label='class2') plt.xlim([-4,4]) plt.ylim([-4,4]) plt.xlabel('x_values') plt.ylabel('y_values') plt.legend() plt.title('Transformed samples with class labels') plt.show() # + from sklearn.decomposition import PCA as sklearnPCA sklearn_pca = sklearnPCA(n_components=2) sklearn_transf = -sklearn_pca.fit_transform((all_samples-mv).T) sklearn_transf # + fig = plt.figure(figsize=(16,8)) plt.plot(sklearn_transf[0:20,0],sklearn_transf[0:20,1], 'o', markersize=7, color=colors[0], alpha=0.7, label='class1') plt.plot(sklearn_transf[20:40,0], sklearn_transf[20:40,1], 'o', markersize=7, color=colors[1], alpha=0.7, label='class2') plt.xlabel('x_values') plt.ylabel('y_values') plt.xlim([-4,4]) plt.ylim([-4,4]) plt.legend() plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()') plt.show() # + sklearn_pca1 = sklearnPCA(n_components=1) sklearn_transf1 = -sklearn_pca1.fit_transform((all_samples-mv).T) sklearn_transf1 # + fig = plt.figure(figsize=(16,8)) plt.plot(sklearn_transf[0:20,0],-np.ones(20), 'o', markersize=7, color=colors[0], alpha=0.7, label='class1') plt.plot(sklearn_transf[20:40,0], np.ones(20), 'o', markersize=7, color=colors[1], alpha=0.7, label='class2') plt.xlabel('x_values') plt.ylabel('y_values') plt.xlim([-4,4]) plt.ylim([-1.5,1.5]) plt.legend() plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()') plt.show() # -
codici/pca.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Quality control of RAW data # # In this chapter, we will download some RAW acquistions obtained in core facility and we will look at some specific MS information such as Total Ion Chromatorams, Base Peak Chromatograms, Extract Ion Chromatograms, number of fragmented precursor ions and so on. # This will help us to check for eventual issues related to the sample prepration and the MS acquisition. # # ### Getting the data # # From your terminal, go to a specific folder that will use as a workspace. # Note: on this computer the workspace path is defined as "/mnt/". So please change this prefix according to your configuration in the future sections of this tutorial. # # From your workspace folder type to download the raw data: # # `wget http://proteomique.ipbs.fr/ibip19/raw_data_ibip19.7z` # # TODO: wget http://proteomique.ipbs.fr/ibip19/OECPI190330_43.raw.7z or update the initial archive # # # Then to extract the files type: # # ` # sudo apt install p7zip-full # 7z x raw_data_ibip19.7z # ` # # The first command will install the 7zip software and the second one will use this tool to unzip the data. # # Once unzipped should see the following file hierarchy on your system: # # ``` # mnt # │ # └───cytoc # │ │ OVKAC141014_60.raw.mzDB # │ │ OVKAC141014_88.raw.mzDB # │ │ QEKAC141027_67.raw.mzDB # │ │ QEKAC141106_158.raw.mzDB # │ │ QEKAC141106_170.raw.mzDB # └───hela # │ │ OECCF190314_03.raw.mzDB # │ │ OVKAC190318_11.raw.mzDB # │ │ OVMMA190417_13.raw.mzDB # └───samples # │ OECPI181123_10.raw.mzDB # │ OECPI190322_06.raw.mzDB # │ OECPI190322_09.raw.mzDB # ``` # # This mzDB files correspond to the raw data LC-MS/MS acquisitions from different Thermo instruments (OV = Orbitrap Velos, QE = OE = Orbitrap Velos ETD, QExactive+). These files were obtained by converting the input .raw files into the open-source mzDB file format (https://github.com/mzdb/mzdb-specs, https://www.mcponline.org/content/early/2014/12/11/mcp.O114.039115.abstract). # # We will use some R packages to query these mzDB files and perform some basic quality control. # ### Installing the packages # # We will use two packages to read the mzDB files: RSQLite and rmzdb. Execute the following R commands to install them: # + #install.packages("RSQLite") #install.packages("https://github.com/mzdb/rmzdb/releases/download/0.1.1/rmzdb_0.1.1.tar.gz", type="source", repo=NULL, INSTALL_opts=c("--no-multiarch")) # - # ### Loading the packages # # To load the packages run: library(RSQLite) library(rmzdb) # ### Browsing mzDB files # # mzDB files are implemented as SQLite database files. If you want to see the full database schema you can have a look at this file https://github.com/mzdb/mzdb-specs/blob/master/version_0.7.0/mzDB_0.7.0_schema_picture.pdf). To open the files run: # + OECCF190314_03 <- "/mnt/hela/OECCF190314_03.raw.mzDB" OVKAC190318_11 <- "/mnt/hela/OVKAC190318_11.raw.mzDB" OVMMA190417_13 <- "/mnt/hela/OVMMA190417_13.raw.mzDB" sqlite_OECCF190314_03 <- dbConnect(RSQLite::SQLite(), OECCF190314_03) sqlite_OVKAC190318_11 <- dbConnect(RSQLite::SQLite(), OVKAC190318_11) sqlite_OVMMA190417_13 <- dbConnect(RSQLite::SQLite(), OVMMA190417_13) # - # The next two lines we allow us to inspect the tables present in an mzDB file and also the field present in the spectrum table (it should be consistent with what you have seen in 'mzDB_0.7.0_schema_picture.pdf'): dbListTables(sqlite_OECCF190314_03) dbListFields(sqlite_OECCF190314_03,"spectrum") # The spectrum table is main of the mzDB file. It contains the meta-data of MS and MS/MS spectrum. Some columns (param_tree,scan_list,precursor_list,product_list) contains information stored in the XML format. Since it is not easy to query this information we will not use it. Columns finishing with 'id' are also not very useful. Useful columns are thus: title, cycle, time, ms_level, activation_type, tic, base_peak_mz, base_peak_intensity, main_precursor_mz, main_precursor_charge, data_points_count. # ### Comparing HeLa complex mixtures # # The 3 provided files (OECCF190314_03, OVKAC190318_11, OVMMA190417_13) correspond to an HeLa cell lysate which has been injected at different time points. This kind of human mixture is very complex and thus quite challenging for our mass spectrometers. It's a perfect way to assess the "good health" of our instruments. Note that all the anlyses we will perform in this section could also be applied on "real samples" (i.e. on your own MS acquistions). # # To verify if the MS acquistions were performed correctly, we will make some basic plots. # We will use simple SQL queries to access these data. We will start to have a look at the number of MS/MS events: # + OECCF190314_03_ms2_count <- dbGetQuery(sqlite_OECCF190314_03, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") OVKAC190318_11_ms2_count <- dbGetQuery(sqlite_OVKAC190318_11, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") OVMMA190417_13_ms2_count <- dbGetQuery(sqlite_OVMMA190417_13, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") hela_ms2_count <- c(OECCF190314_03_ms2_count$n,OVKAC190318_11_ms2_count$n,OVMMA190417_13_ms2_count$n) barplot(hela_ms2_count, main="Comparison of MS/MS events", xlab="Number of MS/MS spectra", names.arg=c("OECCF190314_03", "OVKAC190318_11", "OVMMA190417_13") ) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>How similar are the MS acquisitions in terms of the number of MS/MS events?</ins> # # _Answer_ # # ##### Question II: <ins>Since we know the same sample was injected thre times, would you expect this result?</ins> # # _Answer_ # # ##### Question III: <ins>Is it easy to draw a conclusion from this single figure?</ins> # # _Answer_ # # # Try to perform the same analysis at the MS level using the next cell of code (you can get inspiration from the last example): # #### Add your answers here # (double-click here to edit the cell) # # ##### Question: <ins>Are these results in agreement with your previous observations?</ins> # # _Answer_ # # # To have a better idea of what went wrong, you will execute the following lines to create a Total Ion Chromatgram (TIC) of each acquisition file: OECCF190314_03_ms1_tic <- dbGetQuery(sqlite_OECCF190314_03, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") OVKAC190318_11_ms1_tic <- dbGetQuery(sqlite_OVKAC190318_11, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") OVMMA190417_13_ms1_tic <- dbGetQuery(sqlite_OVMMA190417_13, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") # Then to plot the retrieved TICs run: plot(OVMMA190417_13_ms1_tic$rt,OVMMA190417_13_ms1_tic$intensity, type="l", col="green", lwd=1, xlab="Retention time (min)", ylab="MS Intensity", main = "MS1 total ion chromatograms") lines(OECCF190314_03_ms1_tic$rt,OECCF190314_03_ms1_tic$intensity, type="l", col="red", lwd=1) lines(OVKAC190318_11_ms1_tic$rt,OVKAC190318_11_ms1_tic$intensity, type="l", col="blue", lwd=1) legend("topleft",c("OECCF190314_03","OECCF190314_03", "OVKAC190318_11"), lwd=c(1,1), col=c("red","blue","green")) # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>How similar are the chromatograms?</ins> # # _Answer_ # # ##### Question II: <ins>Considering these results and the previous ones, which MS acquisition would you consider as a valid one?</ins> # # _Answer_ # # ##### Question III: <ins>What are your hypotheses regarding issues which may have occurred in the other acquistions?</ins> # # _Answer_ # # # Use the next cell to do the same analysis at the MS2 level in order to verify these observations: # Finally disconnect from the mzDB files to free allocated computer resources. ### Disconnect from database ### dbDisconnect(sqlite_OECCF190314_03) dbDisconnect(sqlite_OVKAC190318_11) dbDisconnect(sqlite_OVMMA190417_13) # ### Doing QC on standard samples # # In the last section we have made some quality control of HeLa cells mixture acquistions. While this is a popular solution, another one consists in injecting periodically a digest of a given standard protein such as the Bovine serum albumin, the Cytochrome C or the Myoglobin. The main advantage of this kind of sample is not it can be interleaved with your sample of interest without "polluting" too much the liquid chromatography system (carry over issue). It's thus a nice way to monitor your instrument in almost real-time. # # In this section we will use the rmzdb library to query some Cytochrome C sample acquisitions and then assess the health of the LC-MS chain. # # Let's start by opening the files: # + QEKAC141027_67 <- "/mnt/cytoc/QEKAC141027_67.raw.mzDB" OVKAC141014_60 <- "/mnt/cytoc/OVKAC141014_60.raw.mzDB" OVKAC141014_88 <- "/mnt/cytoc/OVKAC141014_88.raw.mzDB" QEKAC141106_158 <- "/mnt/cytoc/QEKAC141106_158.raw.mzDB" QEKAC141106_170 <- "/mnt/cytoc/QEKAC141106_170.raw.mzDB" mzdb_QEKAC141027_67 <- new(MzDb, QEKAC141027_67) mzdb_OVKAC141014_60 <- new(MzDb, OVKAC141014_60) mzdb_OVKAC141014_88 <- new(MzDb, OVKAC141014_88) mzdb_OVKAC141014_158 <- new(MzDb, QEKAC141106_158) mzdb_OVKAC141014_170 <- new(MzDb, QEKAC141106_170) # - # In the next cell we define the m/z values of the Cytochrome C peptide ions. We will use these values later to peform extract some chromatograms. KYPGTK_mz <- 403.7422 KIFVQK_mz <- 381.7473 KTGQAPGFSYTDANK_mz_z2 <- 792.8863 KTGQAPGFSYTDANK_mz_z3 <- 528.9266 KGEREDLIAYLK_mz <- 717.9012 TGPNLHGLFGR_mz_z2 <- 584.8147 TGPNLHGLFGR_mz_z3 <- 390.2122 MIFAGIK_mz <- 390.2278 # To generate the chromatograms we need to create an extractor for each mzDB file, as follow: chromx_QEKAC141027_67 <- new (MzDbExtractor, mzdb_QEKAC141027_67) chromx_OVKAC141014_60 <- new (MzDbExtractor, mzdb_OVKAC141014_60) chromx_OVKAC141014_88 <- new (MzDbExtractor, mzdb_OVKAC141014_88) chromx_OVKAC141014_158 <- new (MzDbExtractor, mzdb_OVKAC141014_158) chromx_OVKAC141014_170 <- new (MzDbExtractor, mzdb_OVKAC141014_170) # We will then call the "get_xic" method to extract the signals. This method takes 6 parameters: # * the ion m/z value # * the ion m/z tolerance # * the minimum retention time # * the maximum retention time # * the MS level # * the SWATH/DIA precursor m/z value (not useful here, so we put -1) # Let's start with a simple XIC example on the file named QEKAC141027_67: # + KYPGTK_67_chrom <- chromx_QEKAC141027_67$get_xic(KYPGTK_mz,0.001,500,1500,1,-1) plot(KYPGTK_67_chrom[,1],KYPGTK_67_chrom[,2], type="l", col="dodgerblue", lwd=1, xlab="Retention time (s)", ylab="MS Intensity",xlim=c(500,1500), main="Chromatogram of KYPGTK in QEKAC141027_67") # - # The previous plot corresponds to what we could expect when there is no LC-MS issue. You can use this a reference. # # Let's make some quality control other mzDB file. We will query the same ion: # + KYPGTK_60_chrom <- chromx_OVKAC141014_60$get_xic(KYPGTK_mz,0.001,500,1500,1,-1) KYPGTK_88_chrom <- chromx_OVKAC141014_88$get_xic(KYPGTK_mz,0.001,500,1500,1,-1) KYPGTK_158_chrom <- chromx_OVKAC141014_158$get_xic(KYPGTK_mz,0.001,500,1500,1,-1) KYPGTK_170_chrom <- chromx_OVKAC141014_170$get_xic(KYPGTK_mz,0.001,500,1500,1,-1) ### Before/after needle change ### plot(KYPGTK_170_chrom[,1],KYPGTK_170_chrom[,2], type="l", col="chocolate4", lwd=1, xlim=c(800,1500), xlab="Retention time (s)", ylab="MS Intensity", main = "XICs of KYPGTK") lines(KYPGTK_158_chrom[,1],KYPGTK_158_chrom[,2], type="l", col="darkorchid", lwd=1) lines(KYPGTK_88_chrom[,1],KYPGTK_88_chrom[,2], type="l", col="chocolate1", lwd=1) lines(KYPGTK_60_chrom[,1],KYPGTK_60_chrom[,2], type="l", col="firebrick1", lwd=1) legend("topright",c("OVKAC141014_170","OVKAC141014_158", "OVKAC141014_88", "OVKAC141014_60"), lwd=c(1,1), col=c("chocolate4","darkorchid","chocolate1","firebrick1")) # - # Let's query another ion in the reference file: # + TGPNLHGLFGR_67_chrom <- chromx_QEKAC141027_67$get_xic(TGPNLHGLFGR_mz_z2,0.002,500,3000,1,-1) plot(TGPNLHGLFGR_67_chrom[,1],TGPNLHGLFGR_67_chrom[,2], type="l", col="dodgerblue", lwd=1, xlab="Retention time (s)", ylab="MS Intensity",xlim=c(1000,2000), main="Chromatogram of TGPNLHGLFGR 2+ in QEKAC141027_67") # - # Let's query this second ion in other files: # + TGPNLHGLFGR_60_chrom <- chromx_OVKAC141014_60$get_xic(TGPNLHGLFGR_mz_z2,0.002,500,3000,1,-1) TGPNLHGLFGR_88_chrom <- chromx_OVKAC141014_88$get_xic(TGPNLHGLFGR_mz_z2,0.002,500,3000,1,-1) TGPNLHGLFGR_158_chrom <- chromx_OVKAC141014_158$get_xic(TGPNLHGLFGR_mz_z2,0.002,500,3000,1,-1) TGPNLHGLFGR_170_chrom <- chromx_OVKAC141014_170$get_xic(TGPNLHGLFGR_mz_z2,0.002,500,3000,1,-1) plot(TGPNLHGLFGR_170_chrom[,1],TGPNLHGLFGR_170_chrom[,2], type="l", col="chocolate4", lwd=1, xlim=c(1000,2000), xlab="Retention time (s)", ylab="MS Intensity", main = "XICs of TGPNLHGLFGR") lines(TGPNLHGLFGR_158_chrom[,1],TGPNLHGLFGR_158_chrom[,2], type="l", col="darkorchid", lwd=1) lines(TGPNLHGLFGR_88_chrom[,1],TGPNLHGLFGR_88_chrom[,2], type="l", col="chocolate1", lwd=1) lines(TGPNLHGLFGR_60_chrom[,1],TGPNLHGLFGR_60_chrom[,2], type="l", col="firebrick1", lwd=1) legend("topright",c("OVKAC141014_170","OVKAC141014_158", "OVKAC141014_88", "OVKAC141014_60"), lwd=c(1,1), col=c("chocolate4","darkorchid","chocolate1","firebrick1")) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>How similar are the XICs between the different mzDB files?</ins> # # _Answer_ # # ##### Question II: <ins>Considering these chromatograms observed in the reference file, which MS acquisition would you consider as a valid one?</ins> # # _Answer_ # # ##### Question III: <ins>What are your hypotheses regarding issues which may have occurred in the other acquistions?</ins> # # _Answer_ # # # ### Doing QC on real samples # # In the last two sections we have used two kind of samples dedicated to quality control. This helped us to monitor our LC-MS setup and to be sure that we performed our acquisitions in optimal conditions. # # Now we are ging inspect the RAW data of real samples coming from an immunoprecipitation experiment. The goal of the project is to do find partners of the JAL34 protein in "Arabidopsis thaliana" cells. A first experiment was performed in November 2018 (OECPI181123_10.raw) and led to the identification of 288 proteins. The same expriment was done again in March 2019 (OECPI190322_06.raw) and we only identified/validated 2 proteins is this second analysis. And HeLa cell mixture was also analyzed in parallel to verify that the instrument was working correctly (file OECPI190322_09.raw). # # We will reuse our skills in raw data quality control to try to understand what could explain this decrease in terms of protein identification. # # Let's start by opening the files: # + sample1811 <- "/mnt/samples/OECPI181123_10.raw.mzDB" sample1903 <- "/mnt/samples/OECPI190322_06.raw.mzDB" sqlite1811 <- dbConnect(RSQLite::SQLite(), sample1811) sqlite1903 <- dbConnect(RSQLite::SQLite(), sample1903) # + sample1811_ms2_count <- dbGetQuery(sqlite1811, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") sample1903_ms2_count <- dbGetQuery(sqlite1903, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") hela_ms2_count <- c(sample1811_ms2_count$n,sample1903_ms2_count$n) barplot(hela_ms2_count, main="Comparison of MS/MS events", xlab="Number of MS/MS spectra", names.arg=c("OECPI181123_10", "OECPI190322_06")) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>Do you observe a signficative difference in terms of the number of MS/MS events?</ins> # # _Answer_ # # Since we have the raw data of an HeLa mixture we could compare it to our previous HeLa reference (OVMMA190417_13): # + helaSample1903 <- "/mnt/samples/OECPI190322_09.raw.mzDB" OVMMA190417_13 <- "/mnt/hela/OVMMA190417_13.raw.mzDB" sqliteHela1903 <- dbConnect(RSQLite::SQLite(), helaSample1903) sqlite_hela_OVMMA190417_13 <- dbConnect(RSQLite::SQLite(), OVMMA190417_13) hela1903_ms2_count <- dbGetQuery(sqliteHela1903, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") hela1904_ms2_count <- dbGetQuery(sqlite_hela_OVMMA190417_13, "SELECT count(*) as n FROM spectrum WHERE ms_level = 2") hela_ms2_count <- c(hela1903_ms2_count$n,hela1904_ms2_count$n) barplot(hela_ms2_count, main="Comparison of MS/MS events", xlab="Number of MS/MS spectra", names.arg=c("HeLa mix 1903", "HeLa mix 1904 (ref)")) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>Can you draw any conclusion from these results?</ins> # # _Answer_ # # Let's compare the total ion chromatograms to see if can get more information there. We can start with the HeLa mixtures and then also do it for our samples. # + hela1903_ms1_tic <- dbGetQuery(sqliteHela1903, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") hela1904_ms1_tic <- dbGetQuery(sqlite_hela_OVMMA190417_13, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") plot(hela1903_ms1_tic$rt,hela1903_ms1_tic$intensity, type="l", col="purple", lwd=1, xlab="Retention time (min)", ylab="MS Intensity") lines(hela1904_ms1_tic$rt,hela1904_ms1_tic$intensity, type="l", col="blue", lwd=1) legend("topleft",c("MS1 TIC of HeLa mix 1903","MS1 TIC of HeLa mix 1904 (ref)"), lwd=c(1,1), col=c("purple","blue")) # - # Let's do the same plot on our samples of interest: # + df1811_ms1_tic <- dbGetQuery(sqlite1811, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") df1903_ms1_tic <- dbGetQuery(sqlite1903, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 1") plot(df1811_ms1_tic$rt,df1811_ms1_tic$intensity, type="l", col="blue", lwd=1, xlab="Retention time (min)", ylab="MS Intensity") lines(df1903_ms1_tic$rt,df1903_ms1_tic$intensity, type="l", col="red", lwd=1) legend("topleft",c("OECPI181123_10 MS1 TIC","OECPI190322_06 MS1 TIC"), lwd=c(1,1), col=c("blue","red")) ### This commented code can be used to perform the Base Peak Chromatogram ### #df1811_ms1_bpc <- dbGetQuery(sqlite1811, "SELECT time,base_peak_intensity as intensity FROM spectrum WHERE ms_level = 1") #df1903_ms1_bpc <- dbGetQuery(sqlite1903, "SELECT time,base_peak_intensity as intensity FROM spectrum WHERE ms_level = 1") #plot(df1811_ms1_bpc$time,df1811_ms1_bpc$intensity, type="l", col="blue", lwd=1, xlab="time", ylab="intensity") #lines(df1903_ms1_bpc$time,df1903_ms1_bpc$intensity, type="l", col="red", lwd=1) #legend("topleft",c("OECPI181123_10 MS1 BPC","OECPI190322_06 MS1 BPC"), lwd=c(1,1), col=c("blue","red")) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>Do you see significative differences between the compared TICs?</ins> # # _Answer_ # # ##### Question II: <ins>Can you draw any conclusion from these results?</ins> # # _Answer_ # # Let's check if the MS/MS TICs are more informative. We will start with the comparison of HeLa mixtures: # + hela1903_ms2_tic <- dbGetQuery(sqliteHela1903, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 2") hela1904_ms2_tic <- dbGetQuery(sqlite_hela_OVMMA190417_13, "SELECT (time / 60) as rt, tic as intensity FROM spectrum WHERE ms_level = 2") plot(hela1903_ms2_tic$rt,hela1903_ms2_tic$intensity, type="l", col="purple", lwd=1, xlab="Retention time (min)", ylab="MS2 Intensity") lines(hela1904_ms2_tic$rt,hela1904_ms2_tic$intensity, type="l", col="blue", lwd=1) legend("topleft",c("MS2 TIC of HeLa mix 1903","MS2 TIC of HeLa mix 1904 (ref)"), lwd=c(1,1), col=c("purple","blue")) # - # Try to use the next cell to do the same MS2 TIC analysis on the compared samples: # From the MS/MS TIC we can see very intense and frequently fragmented species. We will now query to mzDB files to search for the 10 most intense precursor ions. df1903_top10_highest_precs <- dbGetQuery(sqlite1903, "SELECT main_precursor_mz as mz, (time / 60) as rt FROM spectrum WHERE ms_level = 2 ORDER BY tic DESC LIMIT 10") df1903_top10_highest_precs # Remark: from this table we can see that one precursor is both intense and frequently selected. # #### Add your answers here # (double-click here to edit the cell) # # ##### Question: <ins>Do you have you an idea of the peptide that could correspond to this m/z value? Search in google with "peptide XXX" where XXX is the m/z value of this precursor ion (take only 3 decimals).</ins> # # _Answer_ # # # Let's try to do extract chromatograms for this precursor ion in different files. mzdb1811 <- new(MzDb, sample1811) mzdb1903 <- new(MzDb, sample1903) mzdbHela1903 <- new(MzDb, helaSample1903) chromX1811 <- new (MzDbExtractor, mzdb1811) chromX1903 <- new (MzDbExtractor, mzdb1903) chromXHela1903 <- new (MzDbExtractor, mzdbHela1903) # Set the precursor m/z value in the next cell before executing it: # + precursor_mz <- 421.759 ### replace this zero value by the m/z of precursor ion you previously selected precursor_chrom_1811 <- chromX1811$get_xic(precursor_mz,0.001,2000,6000,1,-1) precursor_chrom_1903 <- chromX1903$get_xic(precursor_mz,0.001,2000,6000,1,-1) precursor_chrom_hela <- chromXHela1903$get_xic(precursor_mz,0.001,2000,6000,1,-1) plot(precursor_chrom_1811[,1],precursor_chrom_1811[,2], type="l", col="blue", lwd=1, xlab="time", ylab="intensity") lines(precursor_chrom_1903[,1],precursor_chrom_1903[,2], type="l", col="red", lwd=1) lines(precursor_chrom_hela[,1],precursor_chrom_hela[,2], type="l", col="green", lwd=1) # - # #### Add your answers here # (double-click here to edit the cell) # # ##### Question I: <ins>What do you think about the shape of XIC of the selected precursor in the sample where we had identification issues?</ins> # # _Answer_ # # ##### Question II: <ins>What should be done to solve these issues?</ins> # # _Answer_ # # The experimentalist tried to change the conditions of sample preparation by using another kind of gel. # # Try to use the next cells to analyze the file "OECPI190330_43.raw.mzDB" and check if the issues remained in the new acquisition: sample190330 <- "/mnt/samples/OECPI190330_43.raw.mzDB" sqlite190330 <- dbConnect(RSQLite::SQLite(), sample190330) ### Finally disconnect all databases ### dbDisconnect(sqlite1811) dbDisconnect(sqlite1903) dbDisconnect(sqlite190330) dbDisconnect(sqliteHela1903) dbDisconnect(sqlite_hela_OVMMA190417_13)
pages/qc/.ipynb_checkpoints/raw-data-qc-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6 # language: python # name: python36 # --- # Copyright (c) Microsoft Corporation. All rights reserved. # # Licensed under the MIT License. # # Automated Machine Learning # _**Classification with Local Compute**_ # # ## Contents # 1. [Introduction](#Introduction) # 1. [Setup](#Setup) # 1. [Data](#Data) # 1. [Train](#Train) # 1. [Results](#Results) # 1. [Test](#Test) # # # ## Introduction # # In this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.html#optical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem. # # Make sure you have executed the [configuration](../../../configuration.ipynb) before running this notebook. # # Please find the ONNX related documentations [here](https://github.com/onnx/onnx). # # In this notebook you will learn how to: # 1. Create an `Experiment` in an existing `Workspace`. # 2. Configure AutoML using `AutoMLConfig`. # 3. Train the model using local compute with ONNX compatible config on. # 4. Explore the results and save the ONNX model. # ## Setup # # As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. # + import logging from matplotlib import pyplot as plt import numpy as np import pandas as pd from sklearn import datasets import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.train.automl import AutoMLConfig # + ws = Workspace.from_config() # Choose a name for the experiment and specify the project folder. experiment_name = 'automl-classification-onnx' project_folder = './sample_projects/automl-classification-onnx' experiment = Experiment(ws, experiment_name) output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace Name'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Project Directory'] = project_folder output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T # - # ## Data # # This uses scikit-learn's [load_digits](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) method. # + digits = datasets.load_digits() # Exclude the first 100 rows from training so that they can be used for test. X_train = digits.data[100:,:] y_train = digits.target[100:] # - # ## Train with enable ONNX compatible models config on # # Instantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment. # # Set the parameter enable_onnx_compatible_models=True, if you also want to generate the ONNX compatible models. Please note, the forecasting task and TensorFlow models are not ONNX compatible yet. # # |Property|Description| # |-|-| # |**task**|classification or regression| # |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| # |**iteration_timeout_minutes**|Time limit in minutes for each iteration.| # |**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.| # |**X**|(sparse) array-like, shape = [n_samples, n_features]| # |**y**|(sparse) array-like, shape = [n_samples, ], Multi-class targets.| # |**enable_onnx_compatible_models**|Enable the ONNX compatible models in the experiment.| # |**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.| automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', primary_metric = 'AUC_weighted', iteration_timeout_minutes = 60, iterations = 10, verbosity = logging.INFO, X = X_train, y = y_train, enable_onnx_compatible_models=True, path = project_folder) # Call the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while. # In this example, we specify `show_output = True` to print currently running iterations to the console. local_run = experiment.submit(automl_config, show_output = True) local_run # ## Results # #### Widget for Monitoring Runs # # The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. # # **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details. from azureml.widgets import RunDetails RunDetails(local_run).show() # ### Retrieve the Best ONNX Model # # Below we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. # # Set the parameter return_onnx_model=True to retrieve the best ONNX model, instead of the Python model. best_run, onnx_mdl = local_run.get_output(return_onnx_model=True) # ### Save the best ONNX model from azureml.train.automl._vendor.automl.client.core.common.onnx_convert import OnnxConverter onnx_fl_path = "./best_model.onnx" OnnxConverter.save_onnx_model(onnx_mdl, onnx_fl_path)
how-to-use-azureml/automated-machine-learning/classification-with-onnx/auto-ml-classification-with-onnx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Introduction # # Use of machine learning in the quantitative investment field is, by all indications, skyrocketing. The proliferation of easily accessible data - both traditional and alternative - along with some very approachable frameworks for machine learning models - is encouraging many to explore the arena. # # However, these financial ML explorers are learning that there are many ways in which using ML to predict financial time series differs greatly from labeling cat pictures or flagging spam. Among these differences is that traditional model performance metrics (RSQ, MSE, accuracy, F1, etc...) can be misleading and incomplete. # # Over the past several years, I've developed a set of metrics which have proved useful for comparing and optimizing financial time series models. These metrics attempt to measure models' _predictive power_ but also their _trade-ability_, critically important for those who actually intend to _use_ their models in the real world. # # In this post, I will present a general outline of my approach and will demonstrate a few of the most useful metrics I've added to my standard "scorecard". I look forward to hearing how others may think to extend the concept. If you'd like to replicate and experiment with the below code, _you can download the source notebook for this post by right-clicking on the below button and choosing "save link as"_ # # <a style="text-align: center;" href="https://github.com/convergenceIM/alpha-scientist/blob/master/content/05_model_scoring.ipynb"><img src="images/button_ipynb-notebook.png" title="download ipynb" /></a> # # If you haven't already checked out the previous four installments in this tutorial, you may want review those first. Many of the coding patterns used below are discussed at length: # # * [Part 1: Data Management](data_management.html) # * [Part 2: Feature Engineering](feature_engineering.html) # * [Part 3: Feature Selection](feature_selection.html) # * [Part 4: Walk-forward model building](walk_forward_model_building.html) # # # ### Preparing sample data # # I will illustrate this metrics methodology using a simple example of synthetically generated data (see previous posts in this tutorial for explanations of the below method of creating data). # + ## Remove / Replace this code with a link to your quandl key, if you have one import sys sys.path.append('/anaconda/') import config quandl_key = config.quandl_key from IPython.core.display import Image import numpy as np import pandas as pd pd.core.common.is_list_like = pd.api.types.is_list_like # May be necessary to fix below issue # https://github.com/pydata/pandas-datareader/issues/534 import pandas_datareader.data as web # %matplotlib inline def get_symbols(symbols,data_source, quandl_key=None, begin_date=None,end_date=None): out = pd.DataFrame() for symbol in symbols: df = web.DataReader(symbol, data_source,begin_date, end_date)\ [['AdjOpen','AdjHigh','AdjLow','AdjClose','AdjVolume']].reset_index() df.columns = ['date','open','high','low','close','volume'] df['symbol'] = symbol # add symbol col so we can keep all in the same dataframe df = df.set_index(['date','symbol']) out = pd.concat([out,df],axis=0) #stacks on top of previously collected data return out.sort_index() prices = get_symbols(['AAPL','CSCO','AMZN','YHOO','MSFT'],\ data_source='quandl',quandl_key=quandl_key,begin_date='2012-01-01',end_date=None) # Note: we're only using real price data to generate an index set. # We will make synthetic features and outcomes below instead of deriving from price # - # The below code generates several features then _synthetically generates_ an outcome series from them (along with noise). This guarantees that the features will be informative, since the outcome has been constructed to ensure a relationship. # + num_obs = prices.close.count() def add_memory(s,n_days=50,mem_strength=0.1): ''' adds autoregressive behavior to series of data''' add_ewm = lambda x: (1-mem_strength)*x + mem_strength*x.ewm(n_days).mean() out = s.groupby(level='symbol').apply(add_ewm) return out # generate feature data f01 = pd.Series(np.random.randn(num_obs),index=prices.index) f01 = add_memory(f01,10,0.1) f02 = pd.Series(np.random.randn(num_obs),index=prices.index) f02 = add_memory(f02,10,0.1) f03 = pd.Series(np.random.randn(num_obs),index=prices.index) f03 = add_memory(f03,10,0.1) f04 = pd.Series(np.random.randn(num_obs),index=prices.index) f04 = f04 # no memory features = pd.concat([f01,f02,f03,f04],axis=1) ## now, create response variable such that it is related to features # f01 becomes increasingly important, f02 becomes decreasingly important, # f03 oscillates in importance, f04 is stationary, # and finally a noise component is added outcome = f01 * np.linspace(0.5,1.5,num_obs) + \ f02 * np.linspace(1.5,0.5,num_obs) + \ f03 * pd.Series(np.sin(2*np.pi*np.linspace(0,1,num_obs)*2)+1,index=f03.index) + \ f04 + \ np.random.randn(num_obs) * 3 outcome.name = 'outcome' # - # ### Generating models and predictions # Imagine that we created a simple linear model (such as below) and wanted to measure its effectiveness at prediction. # # >Note: we'll follow the walk-forward modeling process described in the [previous post](walk_forward_model_building.html). If you don't understand the below code snippet (and want to...) please check out that post. # + from sklearn.linear_model import LinearRegression ## fit models for each timestep on a walk-forward basis recalc_dates = features.resample('Q',level='date').mean().index.values[:-1] models = pd.Series(index=recalc_dates) for date in recalc_dates: X_train = features.xs(slice(None,date),level='date',drop_level=False) y_train = outcome.xs(slice(None,date),level='date',drop_level=False) model = LinearRegression() model.fit(X_train,y_train) models.loc[date] = model ## predict values walk-forward (all predictions out of sample) begin_dates = models.index end_dates = models.index[1:].append(pd.to_datetime(['2099-12-31'])) predictions = pd.Series(index=features.index) for i,model in enumerate(models): #loop thru each models object in collection X = features.xs(slice(begin_dates[i],end_dates[i]),level='date',drop_level=False) p = pd.Series(model.predict(X),index=X.index) predictions.loc[X.index] = p # - # ### Traditional model evaluation # So we've got a model, we've got a sizeable set of (out of sample) predictions. Is the model any good? Should we junk it, tune it, or trade it? Since this is a regression model, I'll throw our data into `scikit-learn`'s metrics package. # + import sklearn.metrics as metrics # make sure we have 1-for-1 mapping between pred and true common_idx = outcome.dropna().index.intersection(predictions.dropna().index) y_true = outcome[common_idx] y_true.name = 'y_true' y_pred = predictions[common_idx] y_pred.name = 'y_pred' standard_metrics = pd.Series() standard_metrics.loc['explained variance'] = metrics.explained_variance_score(y_true, y_pred) standard_metrics.loc['MAE'] = metrics.mean_absolute_error(y_true, y_pred) standard_metrics.loc['MSE'] = metrics.mean_squared_error(y_true, y_pred) standard_metrics.loc['MedAE'] = metrics.median_absolute_error(y_true, y_pred) standard_metrics.loc['RSQ'] = metrics.r2_score(y_true, y_pred) print(standard_metrics) # - # <img src="images/confused_scientist.jpg" width="400"> # # These stats don't really tell us much by themselves. You may have an intuition for r-squared so that may give you a level of confidence in the models. However, even this metric [has problems](https://onlinecourses.science.psu.edu/stat501/node/258/) not to mention does not tell us much about the practicality of this signal from a trading point of view. # # True, we could construct some trading rules around this series of predictions and perform a formal backtest on that. However, that is quite time consuming and introduces a number of extraneous variables into the equation. # ### A better way... Creating custom metrics # Instead of relying on generic ML metrics, we will create several custom metrics that will hopefully give a more complete picture of strength, reliability, and practicality of these models. # # I'll work through an example of creating an extensible _scorecard_ with about a half dozen custom-defined _metrics_ as a starting point. You can feel free to extend this into a longer scorecard which is suited to your needs and beliefs. In my own trading, I use about 25 metrics in a standard "scorecard" each time I evaluate a model. You may prefer to use more, fewer, or different metrics but the process should be applicable. # # I'll focus only on regression-oriented metrics (i.e., those which use a continuous prediction rather than a binary or classification prediction). It's trivial to re-purpose the same framework to a classification-oriented environment. # ### Step 1: Preprocess data primitives # Before implementing specific metrics we need to do some data pre-processing. It'll become clear why doing this first will save considerable time later when calculating aggregate metrics. # # To create these intermediate values, you'll need the following inputs: # * __y_pred:__ the _continuous variable_ prediction made by your model for each timestep, for each symbol # * __y_true:__ the _continuous variable_ actual outcome for each timestep, for each symbol. # * __index:__ this is the unique identifier for each prediction or actual result. If working with a single instrument, then you can simply use date (or time or whatever). If you're using multiple instruments, a multi-index with (date/symbol) is necessary. # # In other words, if your model is predicting one-day price changes, you'd want your y_pred to be the model's predictions made as of March 9th (for the coming day), indexed as `2017-03-09` and you'd want the actual _future_ outcome which will play out in the next day also aligned to Mar 9th. This "peeking" convention is very useful for working with large sets of data across different time horizons. It is described ad nauseum in [Part 1: Data Management](data_management.html). # # The raw input data we need to provide might look something like this: print(pd.concat([y_pred,y_true],axis=1).tail()) # We will feed this data into a simple function which will return a dataframe with the y_pred and y_true values, along with several other useful derivative values. These derivative values include: # # * __sign_pred:__ positive or negative sign of prediction # * __sign_true:__ positive or negative sign of true outcome # * __is_correct:__ 1 if sign_pred == sign_true, else 0 # * __is_incorrect:__ opposite # * __is_predicted:__ 1 if the model has made a valid prediction, 0 if not. This is important if models only emit predictions when they have a certain level of confidence # * __result:__ the profit (loss) resulting from betting one unit in the direction of the sign_pred. This is the continuous variable result of following the model # + def make_df(y_pred,y_true): y_pred.name = 'y_pred' y_true.name = 'y_true' df = pd.concat([y_pred,y_true],axis=1) df['sign_pred'] = df.y_pred.apply(np.sign) df['sign_true'] = df.y_true.apply(np.sign) df['is_correct'] = 0 df.loc[df.sign_pred * df.sign_true > 0 ,'is_correct'] = 1 # only registers 1 when prediction was made AND it was correct df['is_incorrect'] = 0 df.loc[df.sign_pred * df.sign_true < 0,'is_incorrect'] = 1 # only registers 1 when prediction was made AND it was wrong df['is_predicted'] = df.is_correct + df.is_incorrect df['result'] = df.sign_pred * df.y_true return df df = make_df(y_pred,y_true) print(df.dropna().tail()) # - # ### Defining our metrics # With this set of intermediate variables pre-processed, we can more easily calculate metrics. The metrics we'll start with here include things like: # * __Accuracy:__ Just as the name suggests, this measures the percent of predictions that were _directionally_ correct vs. incorrect. # * __Edge:__ perhaps the most useful of all metrics, this is the expected value of the prediction over a sufficiently large set of draws. Think of this like a blackjack card counter who knows the expected profit on each dollar bet when the odds are at a level of favorability # * __Noise:__ critically important but often ignored, the noise metric estimates how dramatically the model's predictions vary from one day to the next. As you might imagine, a model which abruptly changes its mind every few days is much harder to follow (and much more expensive to trade) than one which is a bit more steady. # # The below function takes in our pre-processed data primitives and returns a scorecard with `accuracy`, `edge`, and `noise`. # + def calc_scorecard(df): scorecard = pd.Series() # building block metrics scorecard.loc['accuracy'] = df.is_correct.sum()*1. / (df.is_predicted.sum()*1.)*100 scorecard.loc['edge'] = df.result.mean() scorecard.loc['noise'] = df.y_pred.diff().abs().mean() return scorecard calc_scorecard(df) # - # Much better. I now know that we've been directionally correct about two-thirds of the time, and that following this signal would create an edge of ~1.5 units per time period. # # Let's keep going. We can now easily combine and transform things to derive new metrics. The below function shows several examples, including: # * __y_true_chg__ and __y_pred_chg:__ The average magnitude of change (per period) in y_true and y_pred. # * __prediction_calibration:__ A simple ratio of the magnitude of our predictions vs. magnitude of truth. This gives some indication of whether our model is properly tuned to the size of movement in addition to the direction of it. # * __capture_ratio:__ Ratio of the "edge" we gain by following our predictions vs. the actual daily change. 100 would indicate that we were _perfectly_ capturing the true movement of the target variable. # # + def calc_scorecard(df): scorecard = pd.Series() # building block metrics scorecard.loc['accuracy'] = df.is_correct.sum()*1. / (df.is_predicted.sum()*1.)*100 scorecard.loc['edge'] = df.result.mean() scorecard.loc['noise'] = df.y_pred.diff().abs().mean() # derived metrics scorecard.loc['y_true_chg'] = df.y_true.abs().mean() scorecard.loc['y_pred_chg'] = df.y_pred.abs().mean() scorecard.loc['prediction_calibration'] = scorecard.loc['y_pred_chg']/scorecard.loc['y_true_chg'] scorecard.loc['capture_ratio'] = scorecard.loc['edge']/scorecard.loc['y_true_chg']*100 return scorecard calc_scorecard(df) # - # Additionally, metrics can be easily calculated for only long or short predictions (for a two-sided model) or separately for positions which ended up being winners and losers. # * __edge_long__ and __edge_short:__ The "edge" for only long signals or for short signals. # * __edge_win__ and __edge_lose:__ The "edge" for only winners or for only losers. # # # + def calc_scorecard(df): scorecard = pd.Series() # building block metrics scorecard.loc['accuracy'] = df.is_correct.sum()*1. / (df.is_predicted.sum()*1.)*100 scorecard.loc['edge'] = df.result.mean() scorecard.loc['noise'] = df.y_pred.diff().abs().mean() # derived metrics scorecard.loc['y_true_chg'] = df.y_true.abs().mean() scorecard.loc['y_pred_chg'] = df.y_pred.abs().mean() scorecard.loc['prediction_calibration'] = scorecard.loc['y_pred_chg']/scorecard.loc['y_true_chg'] scorecard.loc['capture_ratio'] = scorecard.loc['edge']/scorecard.loc['y_true_chg']*100 # metrics for a subset of predictions scorecard.loc['edge_long'] = df[df.sign_pred == 1].result.mean() - df.y_true.mean() scorecard.loc['edge_short'] = df[df.sign_pred == -1].result.mean() - df.y_true.mean() scorecard.loc['edge_win'] = df[df.is_correct == 1].result.mean() - df.y_true.mean() scorecard.loc['edge_lose'] = df[df.is_incorrect == 1].result.mean() - df.y_true.mean() return scorecard calc_scorecard(df) # - # From this slate of metrics, we've gained much more insight than we got from MSE, R-squared, etc... # * The model is predicting with a strong directional accuracy # * We are generating about 1.4 units of "edge" (expected profit) each prediction, which is about half of the total theoretical profit # * The model makes more on winners than it loses on losers # * The model is equally valid on both long and short predictions # # If this were real data, I would be rushing to put this model into production! # # ### Metrics over time # Critically important when considering using a model in live trading is to understand (a) how consistent the model's performance has been, and (b) whether its current performance has degraded from its past. Markets have a way of discovering and eliminating past sources of edge. # # Here, a two line function will calculate each metric by year: # + def scorecard_by_year(df): df['year'] = df.index.get_level_values('date').year return df.groupby('year').apply(calc_scorecard).T print(scorecard_by_year(df)) # - # It's just as simple to compare performance across symbols (or symbol groups, if you've defined those): # + def scorecard_by_symbol(df): return df.groupby(level='symbol').apply(calc_scorecard).T print(scorecard_by_symbol(df)) # - # ### Comparing models # The added insight we get from this methodology comes when wanting to make comparisons between models, periods, segments, etc... # # To illustrate, let's say that we're comparing two models, a linear regression vs. a random forest, for performance on a training set and a testing set (pretend for a moment that we didn't adhere to [Walk-forward model building](walk_forward_model_building.html) practices...). # + from sklearn.model_selection import train_test_split from sklearn.linear_model import ElasticNetCV,Lasso,Ridge from sklearn.ensemble import RandomForestRegressor X_train,X_test,y_train,y_test = train_test_split(features,outcome,test_size=0.20,shuffle=False) # linear regression model1 = LinearRegression().fit(X_train,y_train) model1_train = pd.Series(model1.predict(X_train),index=X_train.index) model1_test = pd.Series(model1.predict(X_test),index=X_test.index) model2 = RandomForestRegressor().fit(X_train,y_train) model2_train = pd.Series(model2.predict(X_train),index=X_train.index) model2_test = pd.Series(model2.predict(X_test),index=X_test.index) # create dataframes for each model1_train_df = make_df(model1_train,y_train) model1_test_df = make_df(model1_test,y_test) model2_train_df = make_df(model2_train,y_train) model2_test_df = make_df(model2_test,y_test) s1 = calc_scorecard(model1_train_df) s1.name = 'model1_train' s2 = calc_scorecard(model1_test_df) s2.name = 'model1_test' s3 = calc_scorecard(model2_train_df) s3.name = 'model2_train' s4 = calc_scorecard(model2_test_df) s4.name = 'model2_test' print(pd.concat([s1,s2,s3,s4],axis=1)) # - # This quick and dirty scorecard comparison gives us a great deal of useful information. We learn that: # * The relatively simple linear regression (model1) does a very good job of prediction, correct about 68% of the time, capturing >50% of available price movement (this is very good) during training # * Model1 holds up very well out of sample, performing nearly as well on test as train # * Model2, a more complex random forest ensemble model, appears _far_ superior on the training data, capturing 90%+ of available price action, but appears quite overfit and does not perform nearly as well on the test set. # # ### Summary # In this tutorial, we've covered a framework for evaluating models in a market prediction context and have demonstrated a few useful metrics. However, the approach can be extended much further to suit your needs. You can consider: # * Adding new metrics to the standard scorecard # * Comparing scorecard metrics for subsets of the universe. For instance, each symbol or grouping of symbols # * Calculating and plotting performance metrics across time to validate robustness or to identify trends # # In the final post of this series, I'll present a unique framework for creating an _ensemble model_ to blend together the results of your many different forecasting models. # # Please feel free to add to the comment section with your good ideas for useful metrics, with questions/comments on this post, and topic ideas for future posts. # ### One last thing... # # If you've found this post useful, please follow [@data2alpha](https://twitter.com/data2alpha) on twitter and forward to a friend or colleague who may also find this topic interesting. # # Finally, take a minute to leave a comment below - either to discuss this post or to offer an idea for future posts. Thanks for reading!
content/05_Model_Scoring.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Observations and Insights # # ## Dependencies and starter code # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import scipy.stats as st # Study data files mouse_metadata = "data/Mouse_metadata.csv" study_results = "data/Study_results.csv" # Read the mouse data and the study results mouse_metadata = pd.read_csv(mouse_metadata) study_results = pd.read_csv(study_results) # Combine the data into a single dataset mice = pd.merge(study_results, mouse_metadata, on = "Mouse ID", how = 'left') mice.head() # - # ## Summary statistics len(mice["Mouse ID"].unique()) #Duplicate Mice duplicates = mice.loc[mice.duplicated(subset = ["Mouse ID", "Timepoint"]),"Mouse ID"].unique() duplicates #Clean data Frame by droping duplicates mice = mice[mice["Mouse ID"].isin(duplicates) == False] mice.head() mice["Mouse ID"].nunique() # + Regimen_group = mice.groupby(["Drug Regimen"]) mean = Regimen_group["Tumor Volume (mm3)"].mean() median = Regimen_group["Tumor Volume (mm3)"].median() variance = Regimen_group["Tumor Volume (mm3)"].var() standard_dev = Regimen_group["Tumor Volume (mm3)"].std() sem = Regimen_group["Tumor Volume (mm3)"].sem() Regimens_data = pd.DataFrame({"Tumor Vol Mean" : mean, "Tumor Vol Median" : median, "Variance" : variance, "Std" : standard_dev, "SEM" : sem}) Regimens_data # - Regimen_group = mice.groupby(["Drug Regimen"]) Regimen_group.head() # ## Bar plots # + # Generate a bar plot showing number of data points for each treatment regimen using pandas counts = mice["Drug Regimen"].value_counts() counts.plot(kind = "bar") plt.xlabel("Drug Reg") plt.xticks(rotation = 90) plt.ylabel("Number of Data Points") plt.show() #plt.bar(x_axis, count, color='r', alpha=0.5, align="center") # + # Generate a bar plot showing number of data points for each treatment regimen using pyplot counts = mice["Drug Regimen"].value_counts() plt.bar(counts.index.values, counts.values) plt.xlabel("Drug Reg") plt.xticks(rotation = 90) plt.ylabel("Number of Data Points") plt.show() # - # ## Pie plots # + # Generate a pie plot showing the distribution of female versus male mice using pandas # + # Generate a pie plot showing the distribution of female versus male mice using pyplot # - # ## Quartiles, outliers and boxplots # + # Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers. # + # Generate a box plot of the final tumor volume of each mouse across four regimens of interest # - # ## Line and scatter plots # + # Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin # + # Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen # + # Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen # -
Pymaceuticals/.ipynb_checkpoints/pymaceuticals_starter-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="xycs6CRXlQNP" outputId="457d355b-a979-4132-f5c3-98ee97207df1" # ! pip install --upgrade pip -q # ! pip install git+https://github.com/huggingface/transformers.git -q # ! pip install git+https://github.com/huggingface/datasets.git -q # + colab={"base_uri": "https://localhost:8080/"} id="NE3c5fsS_Eqf" outputId="c8ea24ff-6625-4bd8-93ca-7d190c26ef87" from google.colab import drive drive.mount('/gdrive') # + id="VXOnvGlrt3U8" # ! head -n 700000 /gdrive/My\ Drive/SDU_2/external_data/wiki_article.txt > train.txt # ! tail -n 700000 /gdrive/My\ Drive/SDU_2/external_data/wiki_article.txt > val.txt # + colab={"base_uri": "https://localhost:8080/"} id="Jyq4Bu3LAZBO" outputId="5a3d6f75-3e91-4952-b427-7310019ae56e" # ! rm -r /gdrive/My\ Drive/SDU_2/scibert_scivocab_uncased_article && mkdir /gdrive/My\ Drive/SDU_2/scibert_scivocab_uncased_article # ! python /gdrive/My\ Drive/SDU_2/run_mlm.py \ # --model_name_or_path /gdrive/My\ Drive/SDU_2/scibert_scivocab_uncased \ # --train_file train.txt \ # --validation_file val.txt \ # --do_train \ # --do_eval\ # --output_dir /gdrive/My\ Drive/SDU_2/scibert_scivocab_uncased_article \ # --line_by_line \ # --max_seq_length 256\ # --num_train_epochs 1\ # --per_device_train_batch_size 16\ # --per_device_eval_batch_size 16\ # + id="tGYyDc-AzMOQ"
notebooks/gpu-scibert-lm-fine-tuning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 反向传播(Backpropagation)笔记 # # 反向传播是深度学习的基石。 # # ## 导数 # # 先回顾下导数: # # $$\frac{df(x)}{dx}=\lim_{h->0}\frac{f(x+h)-f(x)}{h}$$ # # 函数在每个变量的导数就是偏导数。 # # 对于函数$f(x,y)=x+y$,$\frac{\partial f}{\partial x}=1$,同时,$\frac{\partial f}{\partial y}=1$ # # 梯度就是偏导数组成的矢量。上述例子中,$\Delta f=[\frac{\partial f}{\partial x},\frac{\partial f}{\partial y}]$。 # # # ## 链式法则 # # 对于简单函数,我们可以根据公式直接计算出其导数。但是对于复杂的函数,我们就没那么容易直接写出导数。但是我们有**链式法则(chain rule)**。 # # 定义不多说,咱们举个例子,感受一下链式法则的魅力。 # # 我们熟悉的sigmoid函数$\sigma(x)=\frac{1}{1+e^{-x}}$,如果你记不住它的导数,我们怎么求解呢? # # 求解步骤如下: # # * 将函数模块化,分成多个基本的部分,对于每一个部分都可以使用简单的求导法则进行求导 # * 使用链式法则,将这些导数链接起来,计算出最终的导数 # # 具体如下: # # # * 令$a=x$,则 $\frac{\partial a}{\partial x}=1$ # * 令$b=-a$,则 $\frac{\partial b}{\partial a}=-1$ # * 令$c=e^{b}$,则 $\frac{\partial c}{\partial b}=e^{b}$ # * 令$d=1+c$,则 $\frac{\partial d}{\partial c}=1$ # * 令$e=\frac{1}{d}$,则 $\frac{\partial e}{\partial d}=\frac{-1}{d^2}$ # # 上面的e实际上就是我们的$\sigma(x)$,那么根据链式法则,有: # # $$ # \begin{align} # \frac{\partial \sigma(x)}{\partial x} &=\frac{\partial e}{\partial x} \\ # &=\frac{\partial e}{\partial d}\cdot\frac{\partial d}{\partial c}\cdot\frac{\partial c}{\partial b}\cdot\frac{\partial b}{\partial a}\cdot\frac{\partial a}{\partial x} \\ # &=\frac{-1}{d^2}\cdot1\cdot e^{b}\cdot-1\cdot1 \\ # &=\frac{e^b}{d^{2}} \\ # &=\frac{e^{-x}}{(1+e^{-x})^2} \\ # &=(1-\sigma(x))\cdot\sigma(x) # \end{align} # $$ # # sigmoid函数的导数可以直接用自身表示,这也是很奇妙的性质了。这样的求导过程是不是很简单? # # ## 反向传播代码实现 # # 求导和链式法则我都会了,那么具体的前向传播和反向传播的代码是怎么样的呢? # # 这次我们使用一个更复杂一点点的例子: # # $$f(x,y)=\frac{x+\sigma(x)}{\sigma(x)+(x+y)^2}$$ # # 我们先看下它地forward pass代码: # + import math x = 3 y = -4 sigy = 1.0 / (1 + math.exp(-y)) # sigmoid function num = x + sigy # 分子 sigx = 1.0 / (1 + math.exp(-x)) xpy = x + y xpy_sqr = xpy**2 den = sigx + xpy_sqr # 分母 invden = 1.0 / den f = num * invden # 函数 # - # 上述过程很简单对不对,就是把复杂的函数拆解成一个一个简单函数。 # # 我们看看接下来的反向传播过程: dnum = invden # 因为$f = num * invden$,所以有$\frac{\partial f}{\partial num} = invden$,也就是$dnum = invden$ # + dinvden = num # 同理 dden = (-1.0 / (den**2)) * dinvden # 链式法则 # - # 展开来说:$\frac{\partial invden}{\partial den}=\frac{-1}{den^2}$,又$\frac{\partial f}{\partial invden}=num$,所以$dden=\frac{\partial f}{\partial den}=\frac{partial f}{\partial invden}\cdot \frac{\partial invden}{\partial den} = \frac{-1.0}{den^2}\cdot dinvden$ # # 所以,同理,我们可以写出所有的导数: # + dsigx = (1) * dden dxpy_sqr = (1) * dden dxpy = (2 * xpy) * dxpy_sqr # backprob xpy = x + y dx = (1) * dxpy dy = (1) * dxpy # 这里开始,请注意使用的是"+=",而不是"=” dx += ((1 - sigx) * sigx) * dsigx # dsigma(x) = (1 - sigma(x))*sigma(x) dx += (1) * dnum # backprob num = x + sigy dsigy = (1) * dnum # 注意“+=” dy += ((1 - sigy) * sigy) * dsigy # - # 问题: # # * 上面计算过程中,为什么要用“+=”替代“=”呢? # # 如果变量x,y在前向传播的表达式中出现多次,那么进行反向传播的时候就要非常小心,使用+=而不是=来累计这些变量的梯度(不然就会造成覆写)。这是遵循了在微积分中的多元链式法则,该法则指出如果变量在线路中分支走向不同的部分,那么梯度在回传的时候,就应该进行累加。 #
backpropagation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # Example of a Full-rank system. A = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]]) u, s, v_t = np.linalg.svd(A, full_matrices=True) v = v_t.T print("U:") print(u) print("Singular Values:") print(s) print("V:") print(v) # Example of an over-constrained system. A = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2], [2, 0, 0]]) u, s, v_t = np.linalg.svd(A, full_matrices=True) v = v_t.T print("U:") print(u) print("Singular Values:") print(s) print("V:") print(v) # Example of under-constrained system. A = np.array([[2, 0, 0], [0, 2, 0]]) u, s, v_t = np.linalg.svd(A, full_matrices=True) v = v_t.T print("U:") print(u) print("Singular Values:") print(s) print("V:") print(v)
notebooks/svd.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- print("hello world 2"); name="satish" name type(name) age=21 age type(age) rate=4.3 type(rate) time = 20/60 distance = 15 speed = distance / time print(f"The Speed is {speed}km/hr") from datetime import datetime print(f"The Time is {datetime.now()}") a=['1','2','hello','two'] print(a[0]) type(a) len(a[2]) # + # More information about Range Funciton: https://www.w3schools.com/python/ref_func_range.asp # # Definition and Usage # The range() function returns a sequence of numbers, starting from 0 by default, # and increments by 1 (by default), and stops before a specified number. # # Syntax # range(start, stop, step) for i in range(0,len(a)): print(a[i]) # -
PythonJupyterNotebooks/FirstJupyter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import os os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" import PIL import PIL.Image import tensorflow as tf import tensorflow_datasets as tfds import pathlib import matplotlib.pylab as plt import matplotlib IMG_HEIGHT = 224 IMG_WIDTH = 224 IMG_CHANNELS = 3 reshape_dims = [IMG_HEIGHT,IMG_WIDTH] CLASS_NAMES = ['roses', 'sunflowers', 'daisy', 'dandelion', 'tulips'] # + def read_and_decode(filename, reshape_dims): # 1.Read the file img = tf.io.read_file(filename) # 2.Convert the compressed string to a 3D uint8 tensor. img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS) # 3.Use `convert_image_dtype` to convert to floats in the [0,1] range. img = tf.image.convert_image_dtype(img, tf.float32) # 4.Resize the image to the desired size. return tf.image.resize(img, reshape_dims) # decode_csv changed so that the label is the index into CLASS_NAMES array def decode_csv(csv_row): record_defaults = ["path", "flower"] filename, label_string = tf.io.decode_csv(csv_row, record_defaults) img = read_and_decode(filename, [IMG_HEIGHT, IMG_WIDTH]) label = tf.argmax(tf.math.equal(CLASS_NAMES, label_string)) #to find the only True label return img, label #plot Training metrics def training_plot(metrics, history): f, ax = plt.subplots(1, len(metrics), figsize=(5*len(metrics), 5)) for idx, metric in enumerate(metrics): ax[idx].plot(history.history[metric], ls='dashed') ax[idx].set_xlabel("Epochs") ax[idx].set_ylabel(metric) ax[idx].plot(history.history['val_' + metric]); ax[idx].legend([metric, 'val_' + metric]) # - #define a function of initializing model def train_and_evaluate( batch_size = 32, learning_rate = 0.0001, l1 = 0, l2 = 0, num_hidden = [64,16]): regularizer = tf.keras.regularizers.l1_l2(l1, l2) train_dataset = (tf.data.TextLineDataset('/Users/zhuzhirui/.keras/datasets/flower_photos/train_set.csv') .map(decode_csv)).batch(batch_size) eval_dataset = (tf.data.TextLineDataset("/Users/zhuzhirui/.keras/datasets/flower_photos/eval_set.csv") .map(decode_csv)).batch(batch_size) layers = [tf.keras.layers.Flatten( input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), name='Input_Layer')] layers = layers + [ tf.keras.layers.Dense(nodes, kernel_regularizer=regularizer, activation=tf.keras.activations.relu, name='Hidden_Dense_Layer_{}'.format(hno)) for hno, nodes in enumerate(num_hidden, start=1) ] layers = layers + [ tf.keras.layers.Dense(len(CLASS_NAMES), kernel_regularizer=regularizer, activation='softmax', name='Output_Layer') ] model = tf.keras.Sequential(layers, name='Flower_Classification') model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) print(model.summary()) history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10) training_plot(['loss', 'accuracy'], history) return model model = train_and_evaluate(learning_rate=0.0001, l2=0.001, num_hidden = [64, 16]) #define a function of initializing model, add dropout & batch norm def train_and_evaluate( batch_size = 32, learning_rate = 0.0001, l1 = 0, l2 = 0.001, dropout = 0.4, num_hidden = [64,16]): regularizer = tf.keras.regularizers.l1_l2(l1, l2) train_dataset = (tf.data.TextLineDataset('/Users/zhuzhirui/.keras/datasets/flower_photos/train_set.csv') .map(decode_csv)).batch(batch_size) eval_dataset = (tf.data.TextLineDataset("/Users/zhuzhirui/.keras/datasets/flower_photos/eval_set.csv") .map(decode_csv)).batch(batch_size) layers = [tf.keras.layers.Flatten( input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), name='Input_Layer')] for hno, nodes in enumerate(num_hidden,start=1): layers.extend([ tf.keras.layers.Dense(nodes, kernel_regularizer=regularizer, name='Hidden_Dense_Layer_{}'.format(hno)), tf.keras.layers.BatchNormalization(scale=False, # ReLU center=False, # have bias in Dense name='BatchNorm_Dense_Layer_{}'.format(hno)), # move activation to come after batchnorm tf.keras.layers.Activation('relu', name='ReLU_Dense_Layer_{}'.format(hno)), tf.keras.layers.Dropout(rate=dropout, name='Dropout_Dense_Layer_{}'.format(hno)), ]) layers.append( tf.keras.layers.Dense(len(CLASS_NAMES), kernel_regularizer=regularizer, activation='softmax', name='Output_Layer') ) model = tf.keras.Sequential(layers, name='Flower_Classification') model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), metrics=['accuracy']) print(model.summary()) history = model.fit(train_dataset, validation_data=eval_dataset, epochs=10) training_plot(['loss', 'accuracy'], history) return model model = train_and_evaluate(learning_rate=0.0001, l2=0.001, dropout=0.4, num_hidden = [64, 16])
1.From Linear Model to Deep Neural Networks/ipynbCode/Deep Neural Networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/pachterlab/MBGBLHGP_2019/blob/master/notebooks/runtime_mem.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ykMon0sjedM-" outputId="05c8a648-8df3-43e3-8eb2-346ba251a0f3" colab={"base_uri": "https://localhost:8080/"} # !git clone https://github.com/pachterlab/MBGBLHGP_2019.git # + id="11S5q493ei3N" # !gunzip MBGBLHGP_2019/data/runtime_mem/* # + id="C83zsU2QecWH" import pandas as pd import matplotlib.pyplot as plt import glob import numpy as np import matplotlib # %matplotlib inline # %config InlineBackend.figure_format = 'retina' # + id="tLxsldVlecWM" matplotlib.rcParams.update({'font.size': 20}) # + id="9DO-UA_5ecWM" base = "/content/MBGBLHGP_2019/data/runtime_mem/" c = pd.read_csv(base + "time_cellranger.csv") k1 = pd.read_csv(base + "time_kallisto_step1_bus.csv") k2 = pd.read_csv(base + "time_kallisto_step2_correct_count.csv") star = pd.read_csv(base + "time_star.csv") salmon = pd.read_csv(base + "time_salmon.csv") salmon_nwl = pd.read_csv(base + "time_salmon_nowhitelist.csv") seq_depth=pd.read_csv(base + "seq_depth_stat.csv") seq_depth.index = seq_depth["ID"] # + id="N4FUmQoMecWM" outputId="ebde36df-a8a6-4388-de8b-ae5f4c033c7d" colab={"base_uri": "https://localhost:8080/", "height": 678} seq_depth # + id="dh4L-bzrecWN" k1.index=k1.ID k2.index=k2.ID k = k1 + k2 del k["ID"] # + id="VMbL76HFecWO" k = k.reset_index() # + id="oB0wbEc9ecWO" k["max_rss"] = pd.concat([k1["max_rss"], k2["max_rss"]], axis=1).max(axis=1).values # + id="v6DTex6decWO" outputId="ec75bb56-ab21-45b3-e335-2dc2cefce005" colab={"base_uri": "https://localhost:8080/"} k.s[0] == k1.s[0] + k2.s[0] # + id="0SobGYLFecWO" c["reads"] = c.ID.map(seq_depth["reads"]) k["reads"] = k.ID.map(seq_depth["reads"]) star["reads"] = star.ID.map(seq_depth["reads"]) salmon["reads"] = salmon.ID.map(seq_depth["reads"]) salmon_nwl["reads"] = salmon_nwl.ID.map(seq_depth["reads"]) # + id="aDUyYRx2ecWP" outputId="ee212dfd-a576-49c5-cbc0-c6dfc16b3997" colab={"base_uri": "https://localhost:8080/", "height": 198} k.head() # + id="MCXV8c0BecWP" kallisto = k cellranger = c salmon_whitelist = salmon star = star salmon_nowhitelist = salmon_nwl # + [markdown] id="XBVXia0KecWP" # # Mapping index sizes # + id="u3nQIWhTecWP" outputId="013fef0b-5d80-483c-a017-3d5b3f043888" colab={"base_uri": "https://localhost:8080/"} kallisto.ID.values # + id="dM3pXBN8ecWQ" dstoidx = {'EMTAB7320_v2':2.1, 'heart1k_v2':2.1, 'heart1k_v3':2.1, 'hgmm10k_v3':4.9, 'hgmm1k_v2':4.9, 'hgmm1k_v3':4.9, 'neuron10k_v3':2.1, 'pbmc10k_v3':2.4, 'pbmc1k_v3':2.4, 'SRR6956073_v2':1.4, 'SRR6998058_v2':2.1, 'SRR7299563_v2':1.1, 'SRR8206317_v2':2.1, 'SRR8257100_v2':0.983, 'SRR8327928_v2':2.4, 'SRR8513910_v2':0.713, 'SRR8524760_v2':2.4, 'SRR8599150_v2':2.1, 'SRR8611943_v2':0.582, 'SRR8639063_v2':2.1 } # + id="HlV6sZSMecWQ" kallisto["idx"] = kallisto.ID.map(dstoidx) # + id="n4kKR_IWecWQ" cellranger["idx"] = cellranger.ID.map(dstoidx) salmon_whitelist["idx"] = salmon_whitelist.ID.map(dstoidx) star["idx"] = star.ID.map(dstoidx) salmon_nowhitelist["idx"] = salmon_nowhitelist.ID.map(dstoidx) # + [markdown] id="Fgg-ZBUZecWQ" # # MEMORY # + id="_zryqKSdecWQ" outputId="254d4f4a-bd21-4746-cbf1-24da7ada1e44" colab={"base_uri": "https://localhost:8080/", "height": 725} fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(15,10)) fsize= 30 kallisto_color = '#FF7F0E' tenx_color = '#1F77B4' star_color = '#a1d99b' dotsize = 10 xmax = 1e5 gridalpha = 0.2 for nreads in kallisto['reads']: ax.axvline(x=nreads,linewidth=1, color='lightgrey', linestyle='--', zorder=1) mem = "max_rss" size = 100 ax.set_xscale('log') ax.set_xlim(7e6,1e9) ax.set_yscale("linear", nonposy='clip') ax.set_xlabel('Number of reads in dataset',fontsize=fsize) ax.set_ylabel('Maximum memory used (GB)',fontsize=fsize) ax.set_title('',loc='center') # ax.set_title('E', fontweight='bold', fontsize = fsize, loc = 'left' ) ax.grid(color='dimgrey', linestyle='-', linewidth=0.5, which="both", alpha = gridalpha) ax.scatter( kallisto['reads'], kallisto[mem]/1000, label = 'kallisto', color = kallisto_color, zorder=2, s=size) ax.scatter( cellranger['reads'], cellranger[mem]/1000, label = 'Cell Ranger', color= tenx_color, zorder=2, s=size) # ax.scatter( salmon_whitelist['reads'], salmon_whitelist[mem]/1000, # label = 'Alevin (Salmon)', color= 'black', zorder=2) ax.scatter( salmon_whitelist['reads'], salmon_whitelist[mem]/1000, label = 'Alevin', color= 'black', zorder=2, s=size) ax.scatter( star['reads'], star[mem]/1000, label = 'STARsolo', color= star_color, zorder=2, s=size) ax2 = ax.twiny() #ax2.set_xlim(ax.get_xlim()) #ax2.set_xscale('log') ax2.set_xticks(ticks=np.linspace(kallisto['reads'].min(), kallisto['reads'].max(), kallisto.shape[0])) ax2.set_xticklabels( labels=kallisto.sort_values("reads")['ID']) # ax2.set_xlabel(r"Modified x-axis: $1/(1+X)$") for label in ax2.get_xticklabels(): label.set_rotation(45) label.set_horizontalalignment("left") ax2.tick_params( axis='x', # changes apply to the x-axis which='minor', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.legend() plt.tight_layout() plt.savefig('./max_memory_alevin_no_whitelist.pdf', dpi=300) plt.show() # + [markdown] id="6qk5yd-_ecWR" # # Plot MEMORY by idx size # + id="TJEjAYxoecWR" outputId="db85bb3b-feec-4987-a629-df8f9bd225b7" colab={"base_uri": "https://localhost:8080/", "height": 727} fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(15,10)) fsize= 20 kallisto_color = '#FF7F0E' tenx_color = '#1F77B4' star_color = '#a1d99b' dotsize = 10 xmax = 1e5 gridalpha = 0.2 for nreads in kallisto['idx']: ax.axvline(x=nreads,linewidth=1, color='lightgrey', linestyle='--', zorder=1) mem = "max_rss" size = 100 #ax.set_xscale('log') #ax.set_xlim(7e6,1e9) ax.set_yscale("linear", nonposy='clip') ax.set_xlabel('Number of reads in dataset',fontsize=fsize) ax.set_ylabel('Maximum memory used (GB)',fontsize=fsize) ax.set_title('',loc='center') # ax.set_title('E', fontweight='bold', fontsize = fsize, loc = 'left' ) ax.grid(color='dimgrey', linestyle='-', linewidth=0.5, which="both", alpha = gridalpha) ax.scatter( kallisto['idx'], kallisto[mem]/1000, label = 'kallisto', color = kallisto_color, zorder=2, s=size) ax.scatter( cellranger['idx'], cellranger[mem]/1000, label = 'Cell Ranger', color= tenx_color, zorder=2, s=size) # ax.scatter( salmon_whitelist['reads'], salmon_whitelist[mem]/1000, # label = 'Alevin (Salmon)', color= 'black', zorder=2) ax.scatter( salmon_nowhitelist['idx'], salmon_nowhitelist[mem]/1000, label = 'Alevin', color= 'black', zorder=2, s=size) ax.scatter( star['idx'], star[mem]/1000, label = 'STARsolo', color= star_color, zorder=2, s=size) ax2 = ax.twiny() ax2.set_xlim(ax.get_xlim()) #ax2.set_xscale('log') ax2.set_xticks(ticks=kallisto['idx']) ax2.set_xticklabels( labels=kallisto['ID']) # ax2.set_xlabel(r"Modified x-axis: $1/(1+X)$") for label in ax2.get_xticklabels(): label.set_rotation(45) label.set_horizontalalignment("left") ax2.tick_params( axis='x', # changes apply to the x-axis which='minor', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.legend() plt.tight_layout() #plt.savefig('./max_memory_alevin_no_whitelist.pdf', dpi=300) plt.show() # + [markdown] id="vQB8XWOMecWS" # # RUNTIME # + id="EAV0z0ymecWS" outputId="8730b07a-b145-4385-c656-65c0972765db" colab={"base_uri": "https://localhost:8080/", "height": 707} fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(15,10)) fsize= 30 kallisto_color = '#FF7F0E' tenx_color = '#1F77B4' star_color = '#a1d99b' dotsize = 10 xmax = 1e5 gridalpha = 0.2 for nreads in kallisto['reads']: ax.axvline(x=nreads,linewidth=1, color='lightgrey', linestyle='--', zorder=1) size=100 ax.set_xscale('log') ax.set_xlim(7e6,1e9) ax.set_ylim(10, 1e5) ax.set_yscale("log", nonposy='clip') ax.set_xlabel('Number of reads in dataset',fontsize=fsize) ax.set_ylabel('Processing time (s)',fontsize=fsize) ax.set_title('',loc='center') # ax.set_title('E', fontweight='bold', fontsize = fsize, loc = 'left' ) ax.grid(color='dimgrey', linestyle='-', linewidth=0.5, which="both", alpha = gridalpha) ax.scatter( kallisto['reads'], kallisto['s'], label = 'kallisto', color = kallisto_color, zorder=2, s=size) ax.scatter( cellranger['reads'], cellranger['s'], label = 'Cell Ranger', color= tenx_color, zorder=2, s=size) # ax.scatter( salmon_whitelist['reads'], salmon_whitelist['s'], # label = 'Alevin (Salmon)', color= 'black', zorder=2) ax.scatter( salmon_whitelist['reads'], salmon_whitelist['s'], label = 'Alevin', color= 'black', zorder=2, s=size) ax.scatter( star['reads'], star['s'], label = 'STARsolo', color= star_color, zorder=2, s=size) ax2 = ax.twiny() #ax2.set_xlim(ax.get_xlim()) #ax2.set_xscale('log') ax2.set_xticks(ticks=np.linspace(kallisto['reads'].min(), kallisto['reads'].max(), kallisto.shape[0])) ax2.set_xticklabels( labels=kallisto.sort_values("reads")['ID']) # ax2.set_xlabel(r"Modified x-axis: $1/(1+X)$") for label in ax2.get_xticklabels(): label.set_rotation(45) label.set_horizontalalignment("left") ax2.tick_params( axis='x', # changes apply to the x-axis which='minor', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.legend() plt.tight_layout() plt.savefig('./runtimes_alevin_whitelist.pdf', dpi=300) plt.show() # + id="VOp19zE_ecWT" outputId="61b1dacd-3b65-4770-a330-150a4398e3d8" colab={"base_uri": "https://localhost:8080/"} np.arange(kallisto['reads'].min(), kallisto['reads'].max(), 1e6).shape # + id="lRCl5-GfecWT" outputId="1f126206-3d55-4c0f-bb3c-1be7b8d20cab" colab={"base_uri": "https://localhost:8080/"} np.logspace(int(np.format_float_scientific(kallisto['reads'].min())[-2:]), int(np.format_float_scientific(kallisto['reads'].max())[-2:]), kallisto.shape[0]) # + id="C27OExKQecWU" outputId="6030ba03-97ad-4388-d5fa-31c6144e43eb" colab={"base_uri": "https://localhost:8080/"} int(np.format_float_scientific(kallisto['reads'].min())[-2:]) # + id="jh5zs9azecWV" import math # + id="Rxe4bgIQecWV" outputId="b8546d72-1801-4f8f-e04a-27107b608eaf" colab={"base_uri": "https://localhost:8080/"} math.frexp(10*2)[0]**math.frexp(10*2)[1] # + id="bRL8kqb4ecWV" outputId="9e38b052-def5-49cb-bc17-bb2ff2224662" colab={"base_uri": "https://localhost:8080/", "height": 648} kallisto.sort_values("reads") # + id="1wp-I13HecWV"
notebooks/runtime_mem.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random class Pokemon: def __init__(self,name,lvl,typ,knocked_out,spd): self.name = name self.lvl = lvl self.typ = typ self.m_health = self.lvl * 10 self.c_health = self.m_health self.knocked_out = knocked_out self.atck = self.lvl * 2 self.spd = spd def __repr__(self): return self.name def check_stats(self): tmp_name =self.name.replace("Your ","") print(f""" =================== \n{tmp_name} \nHealth {self.c_health}/{self.m_health}\nLevel {self.lvl}\nSpeed {self.spd}\nDefault attack {self.atck} """) def gain_exp(self): self.lvl += 1 self.m_health += 20 self.c_health = min(self.c_health + 20, self.m_health) print(f"Level of {self.name} increased to level {self.lvl}. Max health got a boost as well and now it's {self.m_health}") def lose_health(self,damage): if self.c_health>0: self.c_health -= damage if self.c_health<=0: self.c_health=0 else: print (f"{self.name} health {self.c_health}/{self.m_health}") if self.c_health<=0: self.knocked_out = True Pokemon.knock_out(self) def gain_health(self): heal = 20 if self.c_health < self.m_health: self.c_health = min(self.c_health + heal, self.m_health) print (f"Health of {self.name} after healing is {self.c_health}/{self.m_health}") if self.c_health > self.m_health: self.c_health = self.m_health print (f"Health of {self.name} is maxed out and it is {self.c_health}") else: print (f"You used potion but health of {self.name} is {self.c_health} and didn't need healing!") def knock_out(self): if self.knocked_out == True: print (f"\n{self.name} has been knocked out!\n") def revival(self): if self.knocked_out == True: self.knocked_out = False self.c_health = self.m_health print (f"{self.name} is back to life with {self.c_health} health") def attack(self,enemy): damage = self.atck + random.randint(1,5) if self.typ == enemy.typ: print(self.name, "attacks", enemy.name, "with normal damage of",damage) enemy.lose_health(damage) else: damage = damage * power_dict[self.typ][enemy.typ] if power_dict[self.typ][enemy.typ] == 2: pass print(f"{self.name} attacks {enemy.name} == > {damage}. {self.typ} against {enemy.typ} is very effective",) elif power_dict[self.typ][enemy.typ] == 0.5: print(f"{self.name} attacks {enemy.name} == > {damage}. {self.typ} against {enemy.typ} is not very effective",) pass enemy.lose_health(damage) class Trainer: def __init__(self,name,pokemons_list,potions,c_pokemon): self.name = name self.pokemons_list = pokemons_list self.potions = potions self.c_pokemon = c_pokemon def __repr__(self): return self.name def check_pl_stats(self): if len(self.pokemons_list) == 0: print(f"{self.name} has no pokemnos left ") else: self.pokemons_list[self.c_pokemon].check_stats() print(f"""potions {self.potions} =================== """) def use_potion(self): if self.potions >0: self.potions -= 1 print(f"{self.name} uses potion on {self.pokemons_list[self.c_pokemon].name.replace('Your ','')}") self.pokemons_list[self.c_pokemon].gain_health() else: print ("You've run out of potions") def attack_pl(self,enemy): if len(enemy.pokemons_list) == 0: print (f"{enemy.name} has no pokemons left! You won!") return False else: while True: enem_cur_pok = enemy.pokemons_list[enemy.c_pokemon] cur_pok = self.pokemons_list[self.c_pokemon] print (f"""\n=========================================== {enem_cur_pok.typ} {enem_cur_pok.name} {enem_cur_pok.c_health}/{enem_cur_pok.m_health} is in front of you! {cur_pok} has {cur_pok.c_health}/{cur_pok.m_health} What do you want to do? ===========================================\n""") print("\n1) Fight! \n2) Use healing potion \n3) Retreat") answ = int(input("\n")) if answ == 1: if cur_pok.knocked_out == True: print ("This Pokemon has been knocked out and can't attack, pick another one") break #main battle here else: if cur_pok.spd > enem_cur_pok.spd: print("\nYour pokemon is faster, it attacks first\n===================FIGHT===================\n") cur_pok.attack(enem_cur_pok) enem_cur_pok.attack(cur_pok) else: print("\nEnemy's pokemon is faster, it attacks first\n===================FIGHT===================\n") enem_cur_pok.attack(cur_pok) cur_pok.attack(enem_cur_pok) if enem_cur_pok.knocked_out == True: cur_pok.gain_exp() enemy.pokemons_list.remove(enem_cur_pok) enemy.c_pokemon = 0 #print(f"{enem_cur_pok} has been returned to the hospital. Again...") break elif answ == 2: Trainer.use_potion(self) elif answ == 3: break def switch_active_pokemon(self): old_pos = self.c_pokemon print(f"Your current Pokemon is {self.pokemons_list[self.c_pokemon]}") count_check = 0 for pokemon in range(len(self.pokemons_list)): if self.pokemons_list[pokemon].knocked_out == True: print (pokemon+1,self.pokemons_list[pokemon].name.replace("Your ",""),"is knocked out") count_check += 1 else: print (pokemon+1,self.pokemons_list[pokemon].name.replace("Your ",""),f"{self.pokemons_list[pokemon].c_health}/{self.pokemons_list[pokemon].m_health} lvl {self.pokemons_list[pokemon].lvl}") if count_check == 3: print ("You lost the battle! As expected ;) ") return False while True: position = int(input("\n")) - 1 try: if self.pokemons_list[position].knocked_out == True: print("This Pokemon has been knocked out, you can't pick it") elif position == old_pos: print("No changes have been made") break elif self.pokemons_list[position].knocked_out == False: self.c_pokemon = position print(self.name,"changed",self.pokemons_list[old_pos],"to",self.pokemons_list[self.c_pokemon]) break except IndexError as e: print("Number is out of range") # Relathionships between pokemon powers power_dict = { "Fire":{"Water":0.5, "Grass":2}, "Water":{"Fire":2, "Grass":0.5}, "Grass":{"Water":2, "Fire":0.5} } # hardcoded Pokemons and parameters for the sake of intersting gameplay ash_charmander = Pokemon("Charmander",5,"Fire",False,65) ash_bulbasaur = Pokemon("Bulbasaur",5,"Grass",False,45) ash_squirtle = Pokemon("Squirtle",5,"Water",False,43) ash = Trainer("Ash",[ash_charmander,ash_bulbasaur,ash_squirtle],0,0) charmander = Pokemon("Your Charmander",3,"Fire",False,65) bulbasaur = Pokemon("Your Bulbasaur",3,"Grass",False,45) squirtle = Pokemon("Your Squirtle",3,"Water",False,43) def battle(): print ("""\n\nWelcome to the training camp where you can battle Pokemons! You have 3 pokemons to fight with 3 opponents. Trainer Ash will fight you so be careful, it's not an easy fight! I would highly recommend paying attention to stats. So what is your name again?\n""") player_name = input() player = Trainer(player_name,[charmander,bulbasaur,squirtle],2,2) print(f"\nWelcome {player_name}! What would you like to do?") while True: print(""" 1) Fight Ash 2) Compare stats 3) Switch your current Pokemon 4) Quit """) answ = int(input("\n")) if answ == 1: #player.attack_pl(ash) if player.attack_pl(ash) == False: break elif answ == 2: print("\n\n\nYour current Pokemon") player.check_pl_stats() print("Ash's current Pokemon") ash.check_pl_stats() elif answ == 3: #player.switch_active_pokemon() if player.switch_active_pokemon() == False: break elif answ == 4: break print("Bye!") battle()
pokemon_game.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np folder = 'C:\\Users\\user\\Documents\\LightHouseLabs\\W09' submission_file = pd.read_csv(folder + "\\submission.csv") submission_file['site']=submission_file['site_path_timestamp'].str.split("_").str[0] submission_file.head() submission_file.shape base_path = 'E:\\indoor-location-navigation\\' # + #Generating waypoints dataframe #Iterating all training files # + import os train_files = 'C:\\Users\\user\\Documents\\LightHouseLabs\\W09\\Rev 1 train files' waypoints_total = pd.DataFrame(columns=['x', 'y', 'f', 'site']) for filename in os.listdir(train_files): if filename.endswith(".csv") and 'train' in filename: waypoints_df= pd.read_csv(os.path.join(train_files, filename)) waypoints_df=waypoints_df[['x','y', 'f']] waypoints_df['site']=filename.split('_')[0] waypoints_df.drop_duplicates(subset=['x', 'y', 'f'], inplace=True) waypoints_total = pd.concat([waypoints_total, waypoints_df], ignore_index=True) else: continue waypoints_total.head(20) # - waypoints_total.shape from scipy.spatial.distance import cdist def add_xy(df): df['xy'] = [(x, y) for x,y in zip(df['x'], df['y'])] return df def closest_point(point, points): """ Find closest point from a list of points. """ return points[cdist([point], points).argmin()] def snap_to_grid(sub, threshold): """ Snap to grid if within a threshold. x, y are the predicted points. x_, y_ are the closest grid points. _x_, _y_ are the new predictions after post processing. """ sub['_x_'] = sub['x'] sub['_y_'] = sub['y'] sub.loc[sub['dist'] < threshold, '_x_'] = sub.loc[sub['dist'] < threshold]['x_'] sub.loc[sub['dist'] < threshold, '_y_'] = sub.loc[sub['dist'] < threshold]['y_'] return sub.copy() submission_file = add_xy(submission_file) waypoints_total = add_xy(waypoints_total) # + ds=[] for (site, myfloor), d in submission_file.groupby(['site','floor']): true_floor_locs = waypoints_total.loc[(waypoints_total['f'] == myfloor) & (waypoints_total['site'] == site)] \ .reset_index(drop=True) if len(true_floor_locs) == 0: print(f'Skipping {site} {myfloor}') continue d['matched_point'] = [closest_point(x, list(true_floor_locs['xy'])) for x in d['xy']] d['x_'] = d['matched_point'].apply(lambda x: x[0]) d['y_'] = d['matched_point'].apply(lambda x: x[1]) ds.append(d) sub = pd.concat(ds) # - sub.head() sub.shape # + # Calculate the distances sub['dist'] = np.sqrt( (sub.x-sub.x_)**2 + (sub.y-sub.y_)**2 ) sub_pp = snap_to_grid(sub, threshold=12) # - sub_pp.head() sub = sub_pp[['site_path_timestamp','floor','_x_','_y_']].rename(columns={'_x_':'x', '_y_':'y'}) sub.head() sub.to_csv('submission_snap_to_grid.csv', index=False)
post-processing Indoor location.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, Float engine = create_engine('sqlite:///:memory:', echo=True) Base = declarative_base() class Flight(Base): __tablename__ = 'flights' id = Column(Integer, primary_key=True) origin_city = Column(String) dest_city = Column(String) origin_state = Column(String) dest_state = Column(String) passengers = Column(Float) mail = Column(Float) airline = Column(String) def __init__(self, origin_city, origin_state, dest_city, dest_state, passengers, mail, airline): self.origin_city = origin_city self.origin_state = origin_state self.dest_city = dest_city self.dest_state = dest_state self.passengers = passengers self.mail = mail self.airline = airline Base.metadata.create_all(engine) fl_01 = Flight("OC_1", "OS_1", "DC_1", "DC_1", 1, 10, "A_1") fl_02 = Flight("OC_1", "OS_1", "DC_1", "DC_1", 2, 9, "A_1") fl_03 = Flight("OC_2", "OS_2", "DC_1", "DC_1", 3, 8, "A_1") fl_04 = Flight("OC_3", "OS_2", "DC_1", "DC_1", 4, 7, "A_2") fl_05 = Flight("OC_3", "OS_2", "DC_1", "DC_1", 5, 6, "A_2") fl_06 = Flight("OC_2", "OS_2", "DC_1", "DC_1", 6, 5, "A_2") fl_07 = Flight("OC_1", "OS_3", "DC_1", "DC_2", 7, 4, "A_2") fl_08 = Flight("OC_2", "OS_3", "DC_1", "DC_2", 8, 3, "A_2") fl_09 = Flight("OC_1", "OS_3", "DC_1", "DC_2", 9, 2, "A_2") fl_10 = Flight("OC_3", "OS_3", "DC_1", "DC_2", 10, 1, "A_2")
notebooks/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv("Social_Network_Ads.csv") df.info() df = df.drop(["User ID", "Gender"], axis = 1) X = df.iloc[:, :-1].values y = df.iloc[:, -1].values from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split sc_X = StandardScaler() X = sc_X.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0) from sklearn.svm import SVC classifier = SVC(kernel = "rbf", random_state = 0) classifier.fit(X_train, y_train) classifier.score(X_test, y_test) y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) sns.heatmap(cm, annot = True) from matplotlib.colors import ListedColormap X_set, Y_set = X_test, y_test X1, X2 = np.meshgrid(np.arange(X_set[:, 0].min(), X_set[:, 0].max(), 0.01), np.arange(X_set[:, 1].min(), X_set[:, 1].max(), 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), cmap = ListedColormap(("red", "green")), alpha = 0.75) sns.scatterplot(X_test[:, 0], X_test[:, 1], palette = {0: "red", 1: "green"}, hue = y_test) plt.title("SVM Classifier Kernel = \"Gaussian\" (Testing Data)") from matplotlib.colors import ListedColormap X_set, Y_set = X_train, y_train X1, X2 = np.meshgrid(np.arange(X_set[:, 0].min(), X_set[:, 0].max(), 0.01), np.arange(X_set[:, 1].min(), X_set[:, 1].max(), 0.01)) plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), cmap = ListedColormap(("red", "green")), alpha = 0.75) sns.scatterplot(X_set[:, 0], X_set[:, 1], palette = {0: "red", 1: "green"}, hue = y_train) plt.title("SVM Classifier Kernel = \"Gaussian\" (Training Data)")
Classification/KernelSVM/KernelSVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/Luis-Martinez-Bautista/daa_2021_1/blob/master/13_enero.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="2V5paj0PSpl8" class NodoArbol: def __init__(self, dato, hijo_izq = None, hijo_der = None): self.dato = dato self.left = hijo_izq self.right = hijo_der # + id="BZLIAe6uSyQl" class BinarySearchTree: def __init__(self): self.__root = None def insert(self, value): if self.__root == None: self.__root = NodoArbol(value, None, None) else: self.__insert_nodo__(self.__root, value) def __insert_nodo__(self, nodo, value): if nodo.dato == value: pass elif value < nodo.dato: if nodo.left == None: nodo.left = NodoArbol(value, None, None) else: self.__insert_nodo__(nodo.left, value) else: if nodo.right == None: nodo.right = NodoArbol(value, None, None) else: self.__insert_nodo__(nodo.right, value) # + id="_WdWauBgbvx0" bst = BinarySearchTree() bst.insert(50) bst.insert(30) bst.insert(20) #bst.search(30)
13_enero.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # NumPy Array Element Selection from matplotlib import pyplot as plt import numpy as np nums = np.arange(10) print(nums) # + nums[nums > 4] gt4 = nums > 4 print(gt4) print(nums[gt4]) # - nums[ [False, True, False, True, False, True, False, True, False, True] ] # + nums[nums > 4] = 100 nums # - nums[ [1,2] ] # + matrix_nums = np.arange(36).reshape(6,6) display(matrix_nums) # - matrix_nums[0,0] matrix_nums[5,5] # + row = 2 col = 3 matrix_nums[row,col] # - matrix_nums[1,:] matrix_nums[:,1] matrix_nums[:,1:4:2] # + colors = { 1: 'red', 2: 'green', 3: 'blue' } nums = np.zeros(64, dtype="int8").reshape(8,8) # nums = np.random.randint(0, 4, 64, dtype="int8").reshape(8,8) # print(nums) # print(nums.shape) points = np.array([ [x,y] for x in range(nums.shape[0]) for y in range(nums.shape[1]) ]) # print(points) # experiment with selecting different elements # nums[:,0:5:2] = 3 # nums[[5,6], 4:6] = 3 # nums[nums==2] = 3 for x, y in points: plt.scatter(x, y, color=colors.get(nums[y,x], 'black')) # -
jupyter_labs_conda/04_NumPy_ElementSelection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import cv2 import imutils import os from os import listdir from matplotlib import pyplot as plt csv_path='../dataset/train.csv' image_dir='../dataset/Train Images/' flip_dir='../dataset/train_flipped/' labels=pd.read_csv(csv_path) labels.head() image_paths=[] for file in listdir(image_dir): image_paths.append(image_dir+file) # + flipped_labels=[] for i in range(len(labels)): img=cv2.imread(image_dir+labels['Image'][i]) img=cv2.flip(img,1) name=labels['Image'][i][:-4] flipped_labels.append([name+'_flip.jpg',labels['Class'][i]]) cv2.imwrite(flip_dir+name+'_flip.jpg',img) # for i in image_paths: # img=cv2.imread(i) # img=cv2.flip(img,1) # name=i.split('/')[-1][:-4] # cv2.imwrite(flip_dir+name+'_flip.jpg',img) # - img=cv2.imread(image_paths[2]) plt.imshow(img) flipped_df=pd.DataFrame(flipped_labels, columns=labels.columns) merged_df=labels.append(flipped_df) merged_df.shape merged_df.head() merged_df.to_csv('../dataset/original_and_flipped.csv', index=False) data=pd.read_csv('../dataset/original_and_flipped.csv') data.shape import random from sklearn.utils import shuffle random.seed(9) data=shuffle(data) train=data.iloc[:10000,] test=data.iloc[10000:,] train.groupby(by=['Class']).count() test.groupby(by=['Class']).count()
Notebooks_m/data flip 102.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # - # Importing the training set dataset_train = pd.read_csv('../input/google-stock/Google_Stock_Price_Train.csv') training_set = dataset_train.iloc[:,1:2].values dataset_train.head() training_set # Feature Scaling from sklearn.preprocessing import MinMaxScaler sc = MinMaxScaler(feature_range = (0, 1)) training_set_scaled = sc.fit_transform(training_set) training_set_scaled # Creating a data structure with 60 timesteps and t+1 output X_train = [] y_train = [] for i in range(60, 1258): X_train.append(training_set_scaled[i-60:i, 0]) y_train.append(training_set_scaled[i, 0]) X_train, y_train = np.array(X_train), np.array(y_train) X_train y_train # Reshaping X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) # Importing the Keras libraries from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM # + #Architecture of RNN # Initialising the RNN regressor = Sequential() # Adding the input layer and the LSTM layer regressor.add(LSTM(units = 3, return_sequences = True, input_shape = (None, 1))) # Adding a second LSTM layer regressor.add(LSTM(units = 3, return_sequences = True)) # Adding a third LSTM layer regressor.add(LSTM(units = 3, return_sequences = True)) # Adding a fourth LSTM layer regressor.add(LSTM(units = 3)) # Adding the output layer regressor.add(Dense(units = 1)) # - # Compiling the RNN regressor.compile(optimizer = 'rmsprop', loss = 'mean_squared_error') # Fitting the RNN to the Training set regressor.fit(X_train, y_train, epochs = 100, batch_size = 35) # Getting the real stock price dataset_test = pd.read_csv('../input/google-stock/Google_Stock_Price_Test.csv') test_set = dataset_test.iloc[:,1:2].values real_stock_price = np.concatenate((training_set[0:1258], test_set), axis = 0) # Getting the predicted stock price scaled_real_stock_price = sc.fit_transform(real_stock_price) inputs = [] for i in range(1258, 1278): inputs.append(scaled_real_stock_price[i-60:i, 0]) inputs = np.array(inputs) inputs = np.reshape(inputs, (inputs.shape[0], inputs.shape[1], 1)) predicted_stock_price = regressor.predict(inputs) predicted_stock_price = sc.inverse_transform(predicted_stock_price) # + # Visualising the results plt.plot(real_stock_price[1258:], color = 'red', label = 'Real Google Stock Price') plt.plot(predicted_stock_price, color = 'blue', label = 'Predicted Google Stock Price') plt.title('Google Stock Price Prediction') plt.xlabel('Time') plt.ylabel('Google Stock Price') plt.legend() plt.show()
Deep Learning using Tensorflow Keras/Stock-Prediction-using-RNN/Stock Prediction by RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="2dOaoFoIv-F2" # # **ArtLine** # **Create** **amazing** **lineart** **portraits** # # + id="JN8RJVoKv8Lt" import torch import torch.nn as nn import fastai from fastai.vision import * from fastai.callbacks import * from fastai.vision.gan import * from torchvision.models import vgg16_bn from fastai.utils.mem import * from PIL import Image import matplotlib.pyplot as plt import numpy as np from torch.autograd import Variable import torchvision.transforms as transforms # + [markdown] id="MYpkQrvncqB1" # ##**Edge Detection** # + id="CqG1XipRciT5" def _gradient_img(img): img = img.squeeze(0) ten=torch.unbind(img) x=ten[0].unsqueeze(0).unsqueeze(0) a=np.array([[1, 0, -1],[2,0,-2],[1,0,-1]]) conv1=nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) conv1.weight=nn.Parameter(torch.from_numpy(a).float().unsqueeze(0).unsqueeze(0)) G_x=conv1(Variable(x)).data.view(1,x.shape[2],x.shape[3]) b=np.array([[1, 2, 1],[0,0,0],[-1,-2,-1]]) conv2=nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False) conv2.weight=nn.Parameter(torch.from_numpy(b).float().unsqueeze(0).unsqueeze(0)) G_y=conv2(Variable(x)).data.view(1,x.shape[2],x.shape[3]) G=torch.sqrt(torch.pow(G_x,2)+ torch.pow(G_y,2)) return G gradient = TfmPixel(_gradient_img) # + [markdown] id="OlGPepVInaQo" # **PATH** # + id="PwBuA4R0m2zf" path = Path('/content/gdrive/My Drive/Apdrawing') # Blended Facial Features path_hr = Path('/content/gdrive/My Drive/Apdrawing/draw tiny') path_lr = Path('/content/gdrive/My Drive/Apdrawing/Tiny Real') # Portrait Pair path_hr3 = Path('/content/gdrive/My Drive/Apdrawing/drawing') path_lr3= Path('/content/gdrive/My Drive/Apdrawing/Real') # + [markdown] id="q0wVDG4onf7G" # **Architecture** # + id="0zieELiLnSbr" arch = models.resnet34 # + [markdown] id="LvQrKkYkshF7" # ### **Facial Features** # + id="5S0vl06RnTMv" src = ImageImageList.from_folder(path_lr).split_by_rand_pct(0.3, seed=42) # + id="8tJphvDRnkO8" def get_data(bs,size): data = (src.label_from_func(lambda x: path_hr/x.name) .transform(get_transforms(xtra_tfms=[gradient()]), size=size, tfm_y=True) .databunch(bs=bs,num_workers = 0).normalize(imagenet_stats, do_y=True)) data.c = 3 return data # + [markdown] id="Yetcm3CHn0Dq" # **64px** # + id="k-Xoit4Mnvb-" bs,size=20,64 # + id="-fqJZQfanmmV" data = get_data(bs,size) # + id="4a-wREZlnpD9" data.show_batch(ds_type=DatasetType.Valid, rows=2, figsize=(9,9)) # + id="JLOmfrvWn4Uy" t = data.valid_ds[0][1].data t = torch.stack([t,t]) # + id="it5H7CoPn4q_" def gram_matrix(x): n,c,h,w = x.size() x = x.view(n, c, -1) return (x @ x.transpose(1,2))/(c*h*w) # + id="fzkaAicvn7Gi" gram_matrix(t) # + id="XkORIHa4n-Fh" base_loss = F.l1_loss # + id="QYQnhpgxoAEF" vgg_m = vgg16_bn(True).features.cuda().eval() requires_grad(vgg_m, False) # + id="9CnEahFKoCPB" blocks = [i-1 for i,o in enumerate(children(vgg_m)) if isinstance(o,nn.MaxPool2d)] blocks, [vgg_m[i] for i in blocks] # + id="NwtR5uSAoFgR" class FeatureLoss(nn.Module): def __init__(self, m_feat, layer_ids, layer_wgts): super().__init__() self.m_feat = m_feat self.loss_features = [self.m_feat[i] for i in layer_ids] self.hooks = hook_outputs(self.loss_features, detach=False) self.wgts = layer_wgts self.metric_names = ['pixel',] + [f'feat_{i}' for i in range(len(layer_ids)) ] + [f'gram_{i}' for i in range(len(layer_ids))] def make_features(self, x, clone=False): self.m_feat(x) return [(o.clone() if clone else o) for o in self.hooks.stored] def forward(self, input, target): out_feat = self.make_features(target, clone=True) in_feat = self.make_features(input) self.feat_losses = [base_loss(input,target)] self.feat_losses += [base_loss(f_in, f_out)*w for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)] self.feat_losses += [base_loss(gram_matrix(f_in), gram_matrix(f_out))*w**2 * 5e3 for f_in, f_out, w in zip(in_feat, out_feat, self.wgts)] self.metrics = dict(zip(self.metric_names, self.feat_losses)) return sum(self.feat_losses) def __del__(self): self.hooks.remove() # + id="Rwt3AAMDoGKv" feat_loss = FeatureLoss(vgg_m, blocks[2:5], [5,15,2]) # + id="2YtjntlCoIgo" wd = 1e-3 y_range = (-3.,3.) # + id="faBs7PvAoQcY" def create_gen_learner(): return unet_learner(data, arch, wd=wd, blur=True,norm_type=NormType.Spectral,self_attention=True, y_range=(-3.0, 3.0), loss_func=feat_loss, callback_fns=LossMetrics) gc.collect(); # + id="7UIo4hEwoS4o" learn_gen = create_gen_learner() # + id="Z_WqNg6NoU5I" learn_gen.lr_find() # + id="l4OK0nMYoYRI" lr = 1-01 epoch = 5 def do_fit(save_name, lrs=slice(lr), pct_start=0.9): learn_gen.fit_one_cycle(epoch, lrs, pct_start=pct_start,) learn_gen.save(save_name) learn_gen.show_results(rows=1, imgsize=5) # + id="D4EcXwfXoc2Q" do_fit('da', slice(lr)) #lr*10 # + id="gzoUTyG7pm-d" learn_gen.unfreeze() # + id="4kIQ5qGYppbS" learn_gen.lr_find() # + id="BB2tZvVzpt-s" epoch = 5 do_fit('db', slice(1E-2)) # + [markdown] id="FtEI1s2BqFz2" # **128px** # + id="XqKWCsBgp236" data = get_data(8,128) learn_gen.data = data learn_gen.freeze() gc.collect() learn_gen.load('db'); # + id="RlFdd8pOp9N1" epoch =5 lr = 1E-03 do_fit('db2',slice(lr)) # + id="bDFMDymEqDDm" learn_gen.unfreeze() # + id="55vcLnbrqK2p" epoch = 5 do_fit('db3', slice(1e-02,1e-5), pct_start=0.3) # + [markdown] id="GMSbEIC8qYv1" # **192px** # # + id="BLmzmoJ7qTJy" data = get_data(5,192) learn_gen.data = data learn_gen.freeze() gc.collect() learn_gen.load('db3'); # + id="VdxT7G9Wqdud" epoch =5 lr = 1E-06 do_fit('db4') # + id="gABZH1FwqjeG" learn_gen.unfreeze() # + id="WqWh1dLTql8z" epoch = 5 do_fit('db5', slice(1e-06,1e-4), pct_start=0.3) # + [markdown] id="I_YqXNzjq8m3" # # **Portraits** # + id="pD6ognJgqqd6" src = ImageImageList.from_folder(path_lr3).split_by_rand_pct(0.2, seed=42) # + id="uFws14t6rA6y" def get_data(bs,size): data = (src.label_from_func(lambda x: path_hr3/x.name) .transform(get_transforms(max_zoom=2.), size=size, tfm_y=True) .databunch(bs=bs,num_workers = 0).normalize(imagenet_stats, do_y=True)) data.c = 3 return data # + [markdown] id="45kYzAwvrL9T" # **128px** # + id="Ish7mEzdrEDa" data = get_data(8,128) learn_gen.data = data learn_gen.freeze() gc.collect() learn_gen.load('db5'); # + id="HdUSomYArTNH" data.show_batch(ds_type=DatasetType.Valid, rows=2, figsize=(9,9)) # + id="zAlhXL46re94" learn_gen.lr_find() # + id="ADT1-uoyrhbA" epoch = 5 lr = 1e-03 do_fit('db6') # + id="whapmkF0rmHu" learn_gen.unfreeze() # + id="tdO4W55TrqQE" epoch = 5 do_fit('db7', slice(6.31E-07,1e-5), pct_start=0.3) # + [markdown] id="WMHRoP1Vr5zA" # **192px** # + id="B_YCV5tYryyV" data = get_data(4,192) learn_gen.data = data learn_gen.freeze() gc.collect() learn_gen.load('db7'); # + id="e0buABN-r82o" learn_gen.lr_find() # + id="crNFdzojr_vS" epoch = 5 lr = 4.37E-05 do_fit('db8') # + id="mRJ_954VsDW7" learn_gen.unfreeze() # + id="Mr8mvuhVsGk2" epoch = 5 do_fit('db9', slice(1.00E-05,1e-3), pct_start=0.3)
Model/Train.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from math import ceil, log from functools import reduce import re from scipy.stats import linregress as linear_regression import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns pd.set_option("display.precision", 16) # - class DBMeta(object): def __init__(self, T, K, Z, B, E, bpe, L, phi=1): self.T, self.K, self.Z, self.B, self.E, self.L = int(T), int(K), int(Z), int(B), int(E), int(L) self.bpe, self.phi = bpe, phi self.is_tiering = True if (K == T - 1) else False self.N = self.estimate_N() self.mfilt = int(self.bpe * self.N) >> 3 def estimate_N(self): num_entries = 0 for level in range(1, int(self.L) + 1): num_entries += (self.B / self.E) * (self.T - 1) * (self.T ** (level - 1)) return num_entries def level(self): return ceil(log((self.N * self.E) / self.B + 1, self.T)) def write_cost(self): cost = self.T * (1 + self.phi) * (self.L / self.B) cost /= self.T if self.is_tiering else 2 return cost def fp(self, curr_level): alpha = (self.T ** (self.T / (self.T - 1))) / (self.T ** (self.L + 1 - curr_level)) return alpha * (np.e ** (-1 * self.mfilt * (np.log(2) ** 2) / self.N)) def non_empty_read_cost(self): cost = 0 run_probs = [self.B * (self.T ** (i)) / (self.E * self.N) for i in range(0, self.L)] fp_levels = [self.fp(i + 1) for i in range(0, self.L)] fp_levels_sum = [0] + list(np.cumsum(fp_levels)) if self.is_tiering: for i in range(0, self.L): for r in range(1, self.T): cost += run_probs[i] * (1 + (self.T - 1) * fp_levels_sum[i] + (r - 1) * fp_levels[i]) else: for i in range(0 , self.L): cost += (self.T - 1) * run_probs[i] * (1 + fp_levels_sum[i]) return cost def old_non_empty_read_cost(self): cost = 1 + self.empty_read_cost() sub = np.e ** ((-1 * self.mfilt * (np.log(2) ** 2) / (self.N * self.L)) + (1 / (self.L * (self.T - 1)))) if self.is_tiering: sub *= (self.T / (2 * (self.T - 1))) cost -= sub return cost def empty_read_cost(self): if self.is_tiering: alpha = self.T ** (self.T / (self.T - 1)) else: alpha = (self.T ** (self.T / (self.T - 1))) / (self.T - 1) beta = np.e ** (-1 * self.mfilt * (np.log(2) ** 2) / self.N) return alpha * beta def dost_write_cost(self): return (1 / self.B) * (((self.T - 1) * (self.level() - 1) / (self.K + 1)) + (self.T - 1) / (self.Z + 1)) # + def set_style(): # This sets reasonable defaults for font size for a paper sns.set_context("paper") # Set the font to be serif sns.set(font='serif')#, rc={'text.usetex' : True}) # Make the background white, and specify the specific font family sns.set_style("white", { "font.family": "serif", "font.serif": ["Times", "Palatino", "serif"] }) # Set tick size for axes sns.set_style("ticks", {"xtick.major.size": 6, "ytick.major.size": 6}) fsz = 14 plt.rc('font', size=fsz) plt.rc('axes', titlesize=fsz) plt.rc('axes', labelsize=fsz) plt.rc('xtick', labelsize=fsz) plt.rc('ytick', labelsize=fsz) plt.rc('legend', fontsize=fsz) plt.rc('figure', titlesize=fsz) # plt.style.use('fivethirtyeight') def read_data(file_path, operation='read'): df = pd.read_csv(file_path) if operation == 'read': df['model_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).non_empty_read_cost(), axis=1) df['old_non_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).old_non_empty_read_cost(), axis=1) df['model_empty_read_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).empty_read_cost(), axis=1) df['measured_non_empty_read_cost'] = df['valid_read_time'] / df['num_reads'] df['measured_empty_read_cost'] = df['empty_read_time'] / df['num_reads'] else: df['model_write_cost'] = df.apply(lambda row: DBMeta(row['T'], row['K'], row['Z'], row['B'], row['E'], row['bpe'], row['L']).write_cost(), axis=1) df['measured_write_cost'] = df['write_time'] / df['num_writes'] return df # - def plot_cost_ratio(df, operation='non_empty_read', num_runs=1): is_tiering = True if df.iloc[-1]['K'] == df.iloc[-1]['T'] - 1 else False fig, ax1 = plt.subplots(figsize=(15,5)) num_op = 'num_reads' if operation == 'non_empty_read' or operation == 'empty_read' else 'num_writes' if operation == 'non_empty_read': op_str = 'Non Empty Read' elif operation == 'empty_read': op_str = 'Empty Read' else: op_str = 'Writes' ax1.set_xlabel('Size Ratio (T)'), ax1.set_ylabel(f'Avg ms per {op_str}', color='black') l1 = ax1.plot(df['T'], df[f'measured_{operation}_cost'], 'o--', dashes=(5,1), color='black') if num_runs > 1: if operation == 'non_empty_read': cols = ['valid_read_time_' + str(i) for i in range(num_runs)] else: cols = [f'{operation}_time_' + str(i) for i in range(num_runs)] df[f'{operation}_std'] = df[cols].div(df[num_op], axis=0).std(axis=1) ax1.errorbar(df['T'], df[f'measured_{operation}_cost'], yerr=df[f'{operation}_std'], markersize=10, ls='') ax1.tick_params(axis='y', labelcolor='black') ax1.set_ylim(bottom=0) ax2 = ax1.twinx() ax2.set_ylabel('Model Cost', color='red') l2 = ax2.plot(df['T'], df[f'model_{operation}_cost'], color='red') ax2.tick_params(axis='y', labelcolor='red') ax2.set_ylim(bottom=0) E, num_non_empty_reads = df.iloc[-1][['E', num_op]] params = ['Entry Size: {} KB'.format(E / (1 << 10))] if operation == 'non_empty_read' or operation == 'empty_read': ax3 = ax1.twiny() ax3.tick_params(axis='x'), ax3.set_xlabel('Buffer Size, Levels'), ax3.set_xlim(ax1.get_xlim()), ax3.set_xticks(df1['T']) ax3.set_xticklabels(list(zip(df1['B'].apply(lambda x : f'{x >> 20} MB'), df['L']))) params += ['{}:\n{} million (~ {:.2f} GB)'.format(op_str, int(num_non_empty_reads / 1e6), int(num_non_empty_reads) * E / (1 << 30))] else: ax3 = ax1.twiny() ax3.tick_params(axis='x'), ax3.set_xlabel('Levels'), ax3.set_xlim(ax1.get_xlim()), ax3.set_xticks(df['T']) ax3.set_xticklabels(df['L']) params += [f'Buffer size : {int(df1.iloc[-1]["B"]) >> 20} MB'] ax1.text(1.075, 0.35, '\n'.join(params), transform=ax1.transAxes) ax1.set_xticks(df['T']), ax1.set_xticklabels(df['T']) ax1.legend(l1 + l2, ['Measured', 'Model'], bbox_to_anchor=(1.05, 1), loc=2, ncol=1) ax1.set_title(f'{op_str} Tiering\n' if is_tiering else f'{op_str} Leveling\n', fontsize=20) fig.tight_layout() return fig def plot_diff(df1, df2, operation='non_empty_read'): fig, ax1 = plt.subplots(figsize=(15,5)) num_op = 'num_reads' if operation == 'non_empty_read' or operation == 'empty_read' else 'num_writes' is_tiering_1 = True if df1.iloc[-1]['K'] == df1.iloc[-1]['T'] - 1 else False is_tiering_2 = True if df2.iloc[-1]['K'] == df2.iloc[-1]['T'] - 1 else False label1, label2 = 'Tier' if is_tiering_1 else 'Level', 'Tier' if is_tiering_2 else 'Level' if operation == 'non_empty_read': op_str = 'Non Empty Read' elif operation == 'empty_read': op_str = 'Empty Read' else: op_str = 'Writes' ax1.set_xlabel('Size Ratio (T)'), ax1.set_ylabel(f'Avg ms per {op_str}') l1 = ax1.plot(df1['T'], df1[f'measured_{operation}_cost'], 'o--', label=label1) l2 = ax1.plot(df2['T'], df2[f'measured_{operation}_cost'], 'o--', label=label2) ax1.set_ylim(bottom=0) E, num_operations = df1.iloc[-1][['E', num_op]] params = ['Entry Size: {} KB'.format(E / (1 << 10))] if operation == 'non_empty_read' or operation == 'empty_read': ax3 = ax1.twiny() ax3.tick_params(axis='x'), ax3.set_xlabel('Buffer Size, Levels'), ax3.set_xlim(ax1.get_xlim()), ax3.set_xticks(df1['T']) ax3.set_xticklabels(list(zip(df1['B'].apply(lambda x : f'{x >> 20} MB'), df1['L']))) params += ['{}:\n{} million (~ {:.2f} GB)'.format(op_str, int(num_operations / 1e6), int(num_operations) * E / (1 << 30))] else: ax3 = ax1.twiny() ax3.tick_params(axis='x'), ax3.set_xlabel('Levels'), ax3.set_xlim(ax1.get_xlim()), ax3.set_xticks(df1['T']) ax3.set_xticklabels(df1['L']) params += [f'Buffer size : {int(df1.iloc[-1]["B"]) >> 20} MB'] ax2 = ax1.twinx() ax2.set_ylabel('Model Cost') l3 = ax2.plot(df1['T'], df1[f'model_{operation}_cost'], label=label1) l4 = ax2.plot(df2['T'], df2[f'model_{operation}_cost'], label=label2) ax2.set_ylim(bottom=0) lines = l1 + l3 + l2 + l4 phony = [plt.plot([], marker='', ls='')[0]]*2 handles = phony[:1] + lines[::2] + phony[1:] + lines[1::2] line_labels = [l.get_label() for l in lines] labels = ['Measured'] + line_labels[::2] + ['Model'] + line_labels[1::2] legend = ax1.legend(handles, labels, bbox_to_anchor=(1.05, 1), loc=2, ncol=2) for vpack in legend._legend_handle_box.get_children(): for hpack in vpack.get_children()[:1]: hpack.get_children()[0].set_width(0) ax1.set_xticks(df1['T']) ax1.set_xticklabels(df1['T']) ax1.text(1.1, 0.35, '\n'.join(params), transform=ax1.transAxes) ax1.set_title(f'{op_str} Difference\n', fontsize=20) fig.tight_layout() return fig # # Non Empty Reads # + set_style() df1 = read_data('../../data/read_cost/4.csv', 'read') _ = plot_cost_ratio(df1, 'non_empty_read', num_runs=5) df2 = read_data('../../data/read_cost/5.csv', 'read') _ = plot_cost_ratio(df2, 'non_empty_read', num_runs=5) _ = plot_diff(df1, df2, 'non_empty_read') # - # # Empty Reads _ = plot_cost_ratio(df1, 'empty_read', num_runs=5) _ = plot_cost_ratio(df2, 'empty_read', num_runs=5) _ = plot_diff(df1, df2, 'empty_read') # # Writes # + df1 = read_data('../../data/write_cost/3.csv', 'writes') _ = plot_cost_ratio(df1, operation='write', num_runs=3) df2 = read_data('../../data/write_cost/4.csv', 'writes') _ = plot_cost_ratio(df2, operation='write', num_runs=3) _ = plot_diff(df1, df2, 'write') # - df1
script/visualization/adjusted_exp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generate classification data # # Use this function to generate sample data sets, wraps scikit-learn's **[make_classification](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html#sklearn-datasets-make-classification)**. See the link for a description of all parameters. # nuclio: ignore import nuclio # + import os import pandas as pd import pyarrow as pa import pyarrow.parquet as pq from typing import Optional, List, Any from sklearn.datasets import make_classification from mlrun.execution import MLClientCtx def gen_class_data( context: MLClientCtx, n_samples: int, m_features: int, k_classes: int, header: Optional[List[str]], label_column: Optional[str] = "labels", weight: float = 0.5, random_state: int = 1, key: str = "classifier-data", file_ext: str = "parquet", sk_params = {} ): """Create a binary classification sample dataset and save. If no filename is given it will default to: "simdata-{n_samples}X{m_features}.parquet". Additional scikit-learn parameters can be set using **sk_params, please see https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html for more details. :param context: function context :param n_samples: number of rows/samples :param m_features: number of cols/features :param k_classes: number of classes :param header: header for features array :param label_column: column name of ground-truth series :param weight: fraction of sample negative value (ground-truth=0) :param random_state: rng seed (see https://scikit-learn.org/stable/glossary.html#term-random-state) :param key: key of data in artifact store :param file_ext: (pqt) extension for parquet file :param sk_params: additional parameters for `sklearn.datasets.make_classification` """ features, labels = make_classification( n_samples=n_samples, n_features=m_features, weights=weight, n_classes=k_classes, random_state=random_state, **sk_params) # make dataframes, add column names, concatenate (X, y) X = pd.DataFrame(features) if not header: X.columns = ["feat_" + str(x) for x in range(m_features)] else: X.columns = header y = pd.DataFrame(labels, columns=[label_column]) data = pd.concat([X, y], axis=1) context.log_dataset(key, df=data, format=file_ext, index=False) # + # nuclio: end-code # - # ### save # + from mlrun import code_to_function from mlrun.platforms.other import auto_mount gpus = False fn_params = { "name" : "gen_class_data", "handler" : "gen_class_data", "kind" : "job", "image" : "mlrun/ml-models" if not gpus else "mlrun/ml-models-gpu", "description" : "simulate classification data using scikit-learn", "categories" : ["simulators", "ml"], "labels" : {"author": "yjb", 'framework': 'sklearn'}, } fn = code_to_function(**fn_params) fn.export("function.yaml") fn.apply(auto_mount()) # - # ### test function # + from mlrun import NewTask, mlconf task_params = { "name": "tasks generate classification data", "params" : { "n_samples" : 10_000, "m_features" : 5, "k_classes" : 2, "weight" : [0.5, 0.5], "sk_params" : {"n_informative": 2}, "file_ext" : "csv"}} # - # ### local from mlrun import run_local run_local(NewTask(**task_params), handler=gen_class_data) # ### remote run = fn.run(NewTask(**task_params), artifact_path=mlconf.artifact_path)
gen_class_data/gen_class_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Prefix Generator # Give initial sentence, then the models will continue to generate the text. # <div class="alert alert-info"> # # This tutorial is available as an IPython notebook at [Malaya/example/prefix-generator](https://github.com/huseinzol05/Malaya/tree/master/example/prefix-generator). # # </div> # %%time import malaya from pprint import pprint # ### Load GPT2 # # Malaya provided Pretrained GPT2 model, specific to Malay, we called it GPT2-Bahasa. This interface not able us to use it to do custom training. # # GPT2-Bahasa was pretrained on ~1.2 billion words. # If you want to download pretrained model for GPT2-Bahasa and use it for custom transfer-learning, you can download it here, https://github.com/huseinzol05/Malaya/tree/master/pretrained-model/gpt2, some notebooks to help you get started. # #### List available GPT2 malaya.generator.available_gpt2() # #### load model # # ```python # def gpt2(model: str = '345M', quantized: bool = False, **kwargs): # """ # Load GPT2 model to generate a string given a prefix string. # # Parameters # ---------- # model : str, optional (default='345M') # Model architecture supported. Allowed values: # # * ``'117M'`` - GPT2 117M parameters. # * ``'345M'`` - GPT2 345M parameters. # # quantized : bool, optional (default=False) # if True, will load 8-bit quantized model. # Quantized model not necessary faster, totally depends on the machine. # # Returns # ------- # result: malaya.model.tf.GPT2 class # """ # ``` model = malaya.generator.gpt2(model = '117M') model_quantized = malaya.generator.gpt2(model = '117M', quantized = True) string = 'ceritanya sebegini, aku bangun pagi baca surat khabar berita harian, tetiba aku nampak cerita seram, ' # #### generate # # ```python # """ # generate a text given an initial string. # # Parameters # ---------- # string : str # maxlen : int, optional (default=256) # length of sentence to generate. # n_samples : int, optional (default=1) # size of output. # temperature : float, optional (default=1.0) # temperature value, value should between 0 and 1. # top_k : int, optional (default=0) # top-k in nucleus sampling selection. # top_p : float, optional (default=0.0) # top-p in nucleus sampling selection, value should between 0 and 1. # if top_p == 0, will use top_k. # if top_p == 0 and top_k == 0, use greedy decoder. # # Returns # ------- # result: List[str] # """ # ``` print(model.generate(string, temperature = 0.1)) print(model.generate(string, temperature = 0.1, top_p = 0.8)) # ### Using Babble method # # We also can generate a text like GPT2 using Transformer-Bahasa. Right now only supported BERT, ALBERT and ELECTRA. # # ```python # def babble( # string: str, # model, # generate_length: int = 30, # leed_out_len: int = 1, # temperature: float = 1.0, # top_k: int = 100, # burnin: int = 15, # batch_size: int = 5, # ): # """ # Use pretrained transformer models to generate a string given a prefix string. # https://github.com/nyu-dl/bert-gen, https://arxiv.org/abs/1902.04094 # # Parameters # ---------- # string: str # model: object # transformer interface object. Right now only supported BERT, ALBERT. # generate_length : int, optional (default=256) # length of sentence to generate. # leed_out_len : int, optional (default=1) # length of extra masks for each iteration. # temperature: float, optional (default=1.0) # logits * temperature. # top_k: int, optional (default=100) # k for top-k sampling. # burnin: int, optional (default=15) # for the first burnin steps, sample from the entire next word distribution, instead of top_k. # batch_size: int, optional (default=5) # generate sentences size of batch_size. # # Returns # ------- # result: List[str] # """ # ``` # Make sure you already installed `tensorflow-probability`, # # ```bash # pip3 install tensorflow-probability==0.7.0 # ``` # + # # !pip3 install tensorflow-probability==0.7.0 # - electra = malaya.transformer.load(model = 'electra') malaya.generator.babble(string, electra) # ### ngrams # # You can generate ngrams pretty easy using this interface, # # ```python # def ngrams( # sequence, # n: int, # pad_left = False, # pad_right = False, # left_pad_symbol = None, # right_pad_symbol = None, # ): # """ # generate ngrams. # # Parameters # ---------- # sequence : List[str] # list of tokenize words. # n : int # ngram size # # Returns # ------- # ngram: list # """ # ``` # + string = 'saya suka makan ayam' list(malaya.generator.ngrams(string.split(), n = 2)) # - list(malaya.generator.ngrams(string.split(), n = 2, pad_left = True, pad_right = True)) list(malaya.generator.ngrams(string.split(), n = 2, pad_left = True, pad_right = True, left_pad_symbol = 'START')) list(malaya.generator.ngrams(string.split(), n = 2, pad_left = True, pad_right = True, left_pad_symbol = 'START', right_pad_symbol = 'END'))
example/prefix-generator/load-prefix-generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <H1> Session 1 - Analysing Transaction Data </H1> # # In this session we will be analysing transaction data. # <br> # The data consist of transaction details that were executed within a specific time window. # Declare the libraries that will be used import pandas as pd # Used to plot the results import matplotlib.pyplot as plt from matplotlib import style style.use('ggplot') # + url = 'http://samplecsvs.s3.amazonaws.com/SalesJan2009.csv' # Use pandas to import data orig_df = pd.read_csv(url) # To keep original dataframe for referencing df = orig_df.copy() print('List of attributes:', df.columns.values.tolist()) # - print(df.head()) print(df.dtypes) # + # Convert object types df['Transaction_date'] = pd.to_datetime(df['Transaction_date']) df['Account_Created'] = pd.to_datetime(df['Account_Created']) df['Last_Login'] = pd.to_datetime(df['Last_Login']) print(df.dtypes) # - # Convert prices to numeric in order to access math functions df['Price'] = df['Price'].str.replace(',','') df['Price'] = pd.to_numeric(df['Price']) print(df.dtypes) # + # Use general apply function caps = lambda x: x.upper() # def caps(x): # return x.upper() df['Country'] = df['Country'].apply(caps) def test(x): if x == "Product1": return "P1" else: return x df['Product'] = df['Product'].apply(test) # + # New column to calculate logins within a week df['Login_week'] = "" def lastweek(x): date = pd.Timestamp(2009, 2, 2) date_diff= pd.Timedelta(date-x).days if date_diff < 7: return "Last week" else: return "Not last week" df['Login_week'] = df['Last_Login'].apply(lastweek) # - print(df[ df['Login_week']=='Last week' ]) # + # Select all rows with payment types amex = df.loc[:,['Price','Payment_Type']] amex = amex[ amex['Payment_Type'] == 'Amex' ] diners = df.loc[:,['Price','Payment_Type']] diners = diners[diners['Payment_Type'] == 'Diners'] mc = df.loc[:,['Price','Payment_Type']] mc = mc[mc['Payment_Type'] == 'Mastercard'] visa = df.loc[:,['Price','Payment_Type']] visa = visa[visa['Payment_Type'] == 'Visa'] result = [amex, diners, mc, visa] result_concat = pd.concat([amex, diners, mc, visa], keys={'amex': amex, 'diners': diners, 'mc': mc, 'visa': visa}) sum_amex = sum(amex['Price']) sum_diners = sum(diners['Price']) sum_mc = sum(mc['Price']) sum_visa = sum(visa['Price']) # Printing all purchases with each payment type print("Sum of purchases with Amex is {}".format(sum_amex)) print("Sum of purchases with Diners is {}".format(sum_diners)) print("Sum of purchases with Mastercard is {}".format(sum_mc)) print("Sum of purchases with Visa is {}".format(sum_visa)) # - # Saving new dataframe to csv result_concat.to_csv('result_dataframe.csv', encoding='utf8') # Get the total number of transactions for each payment type payment_values = df['Payment_Type'].value_counts().to_frame() print(payment_values) # + # Plotting results # Creating new dataframe to be plotted df_plot = pd.DataFrame([[sum_amex, sum_diners, sum_mc, sum_visa]]) df_plot.columns = ['Amex', 'Diners', 'Mastercard', 'Visa'] df_plot.plot.bar(figsize=(13,8)) plt.legend(loc=2) plt.ylabel('Sum $') plt.show() # -
koodiesimerkit/Session 1 - Analysis of Transactions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # Creating a discrete-event simulator # # Let's imagine a factory where a certain resource arrives, a certain server processes it and sends it to another station and leaves the factory. # # ![discrete-event-model.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAboAAAAqCAYAAADF5p5yAAAAAXNSR0IArs4c6QAABEF0RVh0bXhmaWxlACUzQ214ZmlsZSUyMGhvc3QlM0QlMjJhcHAuZGlhZ3JhbXMubmV0JTIyJTIwbW9kaWZpZWQlM0QlMjIyMDIxLTA5LTI0VDE0JTNBNTElM0EzOC40MzVaJTIyJTIwYWdlbnQlM0QlMjI1LjAlMjAoWDExJTNCJTIwTGludXglMjB4ODZfNjQpJTIwQXBwbGVXZWJLaXQlMkY1MzcuMzYlMjAoS0hUTUwlMkMlMjBsaWtlJTIwR2Vja28pJTIwQ2hyb21lJTJGOTQuMC40NjA2LjU0JTIwU2FmYXJpJTJGNTM3LjM2JTIyJTIwdmVyc2lvbiUzRCUyMjE1LjMuMCUyMiUyMGV0YWclM0QlMjJmN2pkSlA1S2M4WUo0Yk1aekNyRCUyMiUyMHR5cGUlM0QlMjJnb29nbGUlMjIlM0UlM0NkaWFncmFtJTIwaWQlM0QlMjJxdGRSTnJEbnl1TVBJNGh1eU1nVSUyMiUzRTFWYkxqcHRBRVB3YVh5T2VhJTJCZVllTDNKSWNraGpwVE5jV1I2WWFLQlJ1UEdRTDQlMkJqV2tNYU5iUktpJTJGYkZ6UlYwejJQNm1wZ0VhN3o1cDFWWmZZUkV6Q0x3RXVhUlhpJTJGQ0lJNGZzM1BqbWg3SXZTRG5raXRUbnJLSDRtdCUyRmdGQ2VzSldPb0g5TEpBUURlbHlUdTZ3S0dCSE0wNVppJTJGVTg3QW5OZk5kU3BlQVEyNTB5THZ0Vko1VDE3Q3BZanZ4NzBHazI3T3pmeVlWek5RVExUZmFaU3JDZVVPRm1FYTR0SXZXanZGbUQ2YlFiZE9uekhzN01uZzVtb2FDWEpJanVlMnFIdTBIQ1Z4V0lsakpNc1ZCbU03SnZMVlpGQXQwQ0hxTXg1Z05peWFUUDVIY2dhcVZ1cWlKa0txUGN5Q3cwbWg0bjQyJTJGZFVxOWlRZmVOckh3RTdRQUtzdTNqRkV5eU9qaW1IZEdROTRRRlBhaGNtNDc0b25PMlRlQjlncHFmbnpGWGhZVElXZjJJY1M5SnA4Tk0xRDFXZGlkVUtLWlROZ1hST1hhbDkwOEY1VVlBeklFUHhpRVdqQ0o5bUslMkJ1eEpMcEtXNnNHZyUyQmtjTThYVVU1elVLYVNSZDlZcXhrNnhhMHpUYkF0MWZFZU5mZm12REMlMkZMZFlCTEVGejFvTm5oSkNFbGZTQ3ZBd2lUM0E5YVMyaHNrbFhSZDZmS3hmZGl2MyUyRmpZMWoxOGJMRjFmdnI5czRkbXk4SmQ0QkN5WjlwMUJYYWVYQXU1eVg3MjdGeTlmMUtsJTJCNlBiQzZYQThzZjlFRDdyZjZLbnNnREM3WEF5dEh2ZzA3N3paMGk2TCUyRnBodkQ4UmZ6T0RmNVR3ODNQd0UlM0QlM0MlMkZkaWFncmFtJTNFJTNDJTJGbXhmaWxlJTNF0uqfbAAAC+JJREFUeF7tXQdoVdsSnXwrdr9i74q9KyoqKnZEEHsXK/aCvWP/9t479q5YUOwNBXvDgl2xV+w9/68hN9x/X2KSc+Ymk5vZ8HiYnD13staevfbMnsMNIqJgsuEWgSC3Bvw43/h1D67x6x5DzRaMX83suPctCAQHBwfbXugUyyCGkFQHivHrlF0i49c5drFhpvEbG1hy7qOHXxM65xjyTAsUlwAqn278KifIpXvGr0sAlU83oRMiyAJFCEilZoxfpcQIuWX8CgGp1IwJnRAxFihCQCo1Y/wqJUbILeNXCEilZkzohIixQBECUqkZ41cpMUJuGb9CQCo1Y0InRIwFihCQSs0Yv0qJEXLL+BUCUqkZEzohYixQhIBUasb4VUqMkFvGrxCQSs2Y0AkRY4EiBKRSM8avUmKE3DJ+hYBUasaETogYCxQhIJWaMX6VEiPklvErBKRSMyZ0QsRYoAgBqdSM8auUGCG3jF8hIJWaMaETIsYCRQhIpWaMX6XECLll/AoBqdSMCZ0QMRYoQkAqNWP8KiVGyC3jVwhIpWZM6ISIsUARAlKpGeNXKTFCbhm/QkAqNWNCJ0SMBYoQkErNGL9KiRFyy/gVAlKpmWgRuocPH1KOHDlo+PDhNHbsWEdQTJ48mS5cuEAbNmxwNP/GjRs0btw4gi8nT550ZONvkwI9UO7evUtDhw6lZMmS0bdv3wh4jhgxgurXr+8Yy8+fP1OhQoVo27ZtVLJkScd2wpp4+/Zt6t69O/Xt25dq167t2rbxG3UI/cHvx48fmc9s2bJRwoQJ6c2bNzR9+nTKmzdv1B30mhHo/EYVnDt37tDUqVNp0aJFVKdOHUqePDn9/PmTnj17RpUqVaKJEydGaLJu3bo8t1u3bhE+6+8HokXopk2bRvgvadKkhA3IyTh79iw9fvyYGjRo4GQ6z1m5ciUtXbo0UIWuHxEtJqKP4QDk6muYSpQoQZMmTaKaNWuyeRw6jh8/Tn369OF/HzhwgGrUqBEhN97P4WuDsC46dOhAqVOnjnBuZB/AYejq1au0cOFCWrt2baAInfFLRK9fv6bZs2fTmDFjeDlgTW7ZsoWwP7gZCoTOr/w6webatWtUpEgRPtTmz5+fTbx//55FLjJChzgsUKAAFStWLEp7hBNfI5oTLUJXpUoV6tmzJzVq1IgXZOnSpSPyyy+/D3Ch+0pE8YhoGhFNCEPwHAsdTnKJEiVicStevDhz8/v3b0KWh5M0TtXVqlWjS5cu/ZW3yD4nRX6WLFn4YBMgGZ3xS0S/fv2it2/fUrp06XiZHDx4kGrVqkXfv3+n+PHjO146CoTOb/w6BSUsoYOtFStWULt27aJkNrpj39c5vwsdsrDevXvT5s2bKVOmTNS6dWtOiT1j69at1KZNG0JpEiWswoUL0/3797k8VqZMGVq2bBlnYCNHjgwtO/bq1YvmzJnDcwYMGED37t3jzAIb2/z586lTp074Fln68OEDZ5HLly/n74sLcKHrTUSoJ/wr5NviZ/gInmOhA1c40SVIkIBmzpxJVatW9Xz/Hn39+pVLE+CucePG1LVrV8Z+yJAhVLBgQTpz5gzzjdKk73NYG+3bt6dNmzZR9erVCaWufv1wsCX68uULiys+D2uhWbNm9OTJEypatCidP3+eEidOTPv376eMGTOGG3ABJnTGbxhMr1q1isvTyPTcDAVC51d+nWATntB5bB0+fJj3WsTZ+vXrOR4XL15Mc+fO5Uc6d+7Mh5Bhw4b9I/ZLlSrlxCXHc/wudDNmzKD06dNTixYt+I/dvXs3C1bIB7PjGTJkoFGjRlGrVq0YMGQLCxYs4GcPHTpETZo04Y0UpSiI3o8fPxhcbIKwi4GS5urVq1nYUFcGyMhEUqZMSefOneNNN8CFDjC8IaJ/h6yGHz6C98HNN4xDsJCRQ5xy587Ni9dzqsOpukuXLoS6Psbp06eZB4jS+PHj6fLlyyxmvs/h2Tx58jCvELpBgwYR7mBwWMHAegDPKJPgGRyWsB7+/PlD5cuX53WBTS68EWBCZ/yGQXSPHj0oXrx4NGvWLMebICYqEDq/8usEHF+hQ+xhD0GseoZnf7558yaXkZHUZM2alX/dsWNH3tvRGxFW7DvxyekcvwsdSlo7duzgy8xjx44RypgnTpygihUrhvoMMFBn9/wMmxr+DXA8Y82aNaFCh58NHDiQs4WjR4/So0ePGOR58+b9AwfYxueXK1cuOoTOKQ/+nPeTiHYSUUM3QgcHUR5CBo4TG8QMBw0s7L8tYnAJ/Pft2xeh0OXLl4+blSBgGOvWreO7GASR75po27YtiyCCKBqFzp88ObUdZ/m9desWZ/rYV1KkSOEUP57nffB2ZUh+shi/UXXNI3SeZhQkKIhPb6GDTRxIcY+HPblp06ahHxNnhA6lJghMhQoV+I/HSXzPnj2cCXjSW/zcidB57ocAMC490eGD8hhq+FOmTKGXL19yNoeyGTbiaBK6oKguJuHnUb9JE2JTNKPz9ROLGPetyNZ8hQ6lRdyNpU2bli5evMicREboUqVKxVx67tT27t3LGfu7d++0CJ3xS8Sl45jm9/nz53zlsWTJEsqePbvrMFKS0UVb/EYGMN+M7siRI5zR4erCe1y5coUbTvB7JDKeEWeEDuUEbHYtW7YM/eORAWzcuJHvW1BycCp0mIf6L7qCkNGhNObJArD4AbrHdjRmdDG5EaLG/5+QhpRgIhK7o8M93Pbt20PLxMAVZeX+/ftztuUrdGhQAQeVK1dmgYpsRod7wNGjR4eeCpHR4QSJw4ySjM74JeIGpJjkFyKHUjkOy8jqJYYCofNb/DrFJ7w7OrxikCZNGn69AwNXUrgrxyEVDWm4y8eIM0IHIcLdGu5rPAMlL9yvoM3ckwI7yehgD5svShdoN8adHAba1WEbGQQyCTTA7Ny5My5kdH7r2vr06ROXE3ft2hV6OEETEO5AUb5E6ahhw4b06tUrLgGhTA0OkEXjsIPMDHz4Pge+vO/oBg8ezB11uNDGQONS5syZQ+/ovMvZMVS6jEmhM36J6OnTp7wuJkyYwHf/noGNFs1LTocCofMbv04xCU/oEPt4JxoVMzQL5syZkw+1qKghqUEJ01fowop9p345meeXOzp0zGEDRHkDTQt4TwoDXXVoMkFHXtmyZbmRAN1zqOuinAkAkyRJwp17aGXHvRs20OvXr/PJ/tSpU7zpNW/enO2haQVNJmg2weaK8eDBA870kEkCfAgtSqf4LNznQGhRzkRjheRQECh+ew8HzT8oE7148YLxRsMI8EbXKw4x6G5FhyxOeVjk4A4BgAMNLqYhULhjRXnD+zl0yoFzdGuiEQUbF7pnkelDRPF/NDPhwILTIeyiyw4vraNbE+/e4d84zHgPfB6exQEI7/1BcHEQQmA6HcavDn6xp6DTz3f43vtHledA5jeqWOD5sF4Yx89xT499GFk1GgexzyMGc+XKxbzgDh8cYS/APo+7U9yzozHNO/br1avnxC3Hc/widI69icUTFQRKROi5er0gIuOB/nvjN7AZNn7jBr8oydhG6IJrCxQX4MWCqcZvLCDJhYvGrwvwYsFUy+iESLJAEQJSqRnjVykxQm4Zv0JAKjVjQidEjAWKEJBKzRi/SokRcsv4FQJSqRkTOiFiLFCEgFRqxvhVSoyQW8avEJBKzZjQCRFjgSIEpFIzxq9SYoTcMn6FgFRqxoROiBgLFCEglZoxfpUSI+SW8SsEpFIzJnRCxFigCAGp1Izxq5QYIbeMXyEglZoxoRMixgJFCEilZoxfpcQIuWX8CgGp1IwJnRAxFihCQCo1Y/wqJUbILeNXCEilZkzohIixQBECUqkZ41cpMUJuGb9CQCo1Y0InRIwFihCQSs0Yv0qJEXLL+BUCUqkZEzohYixQhIBUasb4VUqMkFvGrxCQSs2Y0AkRY4EiBKRSM8avUmKE3DJ+hYBUasaETogYCxQhIJWaMX6VEiPklvErBKRSM/8ndEp9jE1uxeQXc0aEE7513IY7BIxfd/hpn238amfInX9Bmgl296fZbEPAEDAEDAFD4H8I/Bf96zKyF3RyRAAAAABJRU5ErkJggg==) # # # ## Rules # # - Arrivals have a quantity of components and a interarraival time. # # - Stations process the components, one at time. # # - When an input enters to the station, updates the processing time, and adds the element to process. # # - When processing time is completed, extracts the part from the elements to process and start to process another if it is possible. # # - Exit counts the procesed entities. # ## Creating stations # # Stations are `DiscreteEventModel`s, so you can extend the class and implement the abstract methods to work with them. # + pycharm={"name": "#%%\n"} import sys if sys.version_info >= (3, 8): from typing import TypedDict else: from typing_extensions import TypedDict from typing import Dict from gsf.core.expressions import Expression from gsf.core.types import Time from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem from gsf.models.models import DiscreteEventModel class StationState(TypedDict): """State definition of the station""" parts: int remaining_time: Time class Station(DiscreteEventModel): """Station of the simulator It process the inputs that receives. Its state has the number of parts that currently are inside the station and the remaining time to finish to process one of that parts. Attributes: _processing_time(Expression): time to process one part. """ _processing_time: Expression def __init__(self, dynamic_system: DiscreteEventDynamicSystem, processing_time: Expression): """ Args: dynamic_system (DiscreteEventDynamicSystem): factory where stations belongs. processing_time (Expression): time to process one part. """ super().__init__(dynamic_system, state={ 'parts': 0, 'remaining_time': -1 }) self._processing_time = processing_time def _internal_state_transition_function(self, state: StationState) -> StationState: """Removes one part from processing, and schedules and event to process a new one. """ state["parts"] = max(state["parts"] - 1, 0) self.schedule(self.get_time()) return state def _external_state_transition_function(self, state: StationState, inputs: Dict[str, int], event_time: Time) -> StationState: """Adds parts to process """ values = inputs.values() state["remaining_time"] = state["remaining_time"] - event_time for number_of_parts in values: if state["parts"] > 0: state["parts"] = state["parts"] + number_of_parts elif state["parts"] == 0: state["parts"] = number_of_parts self.schedule(self.get_time()) return state def _time_advance_function(self, state: StationState) -> Time: """Obtains the time of the next processed entity. """ if state["parts"] < 1: state["remaining_time"] = Time(-1) else: state["remaining_time"] = Time(self._processing_time.evaluate()) return state["remaining_time"] def _output_function(self, state: StationState) -> int: """Returns a part. """ if state["parts"] > 0: return 1 return 0 def __str__(self): return self.get_id() # - # Stations process the parts for a `_processing_time`. # # When time ends, an autonomous event is emitted, so the framework runs the `_internal_state_transition_function`, where the station removes one part from processing, and schedules an event to process a new one. When a part comes from another station, the framework runs the `_external_state_transition_function` where the station adds new parts to process. # # The time is `Time(-1)` if there is no parts to process, and evaluates an expression if there are parts. An expression is a datatype of the framework that allows include almost any value. # # # ## Creating the generator # # The generator creates parts given an interarrival time, and the number of pieces to create at that arrival. # + pycharm={"name": "#%%\n"} from typing import List, Union from gsf.core.expressions import Expression from gsf.core.types import Time from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem from gsf.models.core.base_model import ModelState from gsf.models.models import DiscreteEventModel GeneratorState = Union[Expression, List[int]] class Generator(DiscreteEventModel): """Generator of parts Creates parts given an interarrival time, and the number of pieces to create at that arrival Attributes: _interarrival_time (Expression): interarrival time of pieces. """ _interarrival_time: Expression def __init__(self, dynamic_system: DiscreteEventDynamicSystem, pieces: GeneratorState, interarrival_time: Expression): """Args: dynamic_system(DiscreteEventDynamicSystem): factory where stations belongs. pieces(GeneratorState): number of pieces to create when there is an arrival. interarrival_time(Expression): interarrival time of pieces. """ super().__init__(dynamic_system, state=pieces) self.schedule(Time(0)) self._interarrival_time = interarrival_time def _internal_state_transition_function( self, state: GeneratorState) -> ModelState: """Generates a part""" if isinstance(state, list): state.pop(0) self.schedule(self.get_time()) return state def _time_advance_function(self, state: GeneratorState) -> Time: """Calculates the time of the creation of next part""" if isinstance(state, list): return self._interarrival_time.evaluate() if len(state) > 0 else Time(-1) else: return self._interarrival_time.evaluate() def _output_function(self, state: GeneratorState): """Get the created part""" if isinstance(state, list): return state[0] else: return state.evaluate() def __str__(self): return "Generator" # - # The generator state could be a list of integers or an expression. If it is a list, the state represent the number of parts per arrival for each arrival, so the `parts[0]` equals to the number of parts at first arrival, `parts[1]` to the second arrival, and so on. On the other hand, if it is an expression, it generates the number of parts that the expression gives during infinite arrivals. # # If the state is a list, the output of an event should be the first element in the list, in turn, if it is a expression, it will evaluate the expression. # # If the parts list is empy, the generator stops of schedule autonomous events. # # # ## Creating the exit # + pycharm={"name": "#%%\n"} from typing import Dict from gsf.core.types import Time from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem from gsf.models.core.base_model import ModelState from gsf.models.models import DiscreteEventModel class Exit(DiscreteEventModel): """Exit Sink of a factory. Here come all the processed part of the factory. """ def __init__(self, dynamic_system: DiscreteEventDynamicSystem): """Args: dynamic_system(DiscreteEventDynamicSystem): factory where stations belongs. """ super().__init__(dynamic_system, state=0) def _external_state_transition_function(self, state: int, inputs: Dict[str, int], event_time: Time) -> int: """Receives the parts""" return state + sum(inputs.values()) def _time_advance_function(self, state: ModelState) -> Time: """Prevents to execute an autonomous event""" return Time(-1) def _output_function(self, state: ModelState) -> int: """Returns the number of parts processed.""" return state def __str__(self): return "Exit" # - # ## Creating the factory # # Is the dynamic system where all the stations process parts. # + pycharm={"name": "#%%\n"} from gsf.core.expressions import Expression from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem class FactorySystem(DiscreteEventDynamicSystem): """Factory system Is the dynamic system where all the stations process parts. Attributes: generator (Generator): Generator of entities of the factory. exit (Exit): Sink of the factory, """ generator: Generator exit: Exit def __init__(self, pieces: GeneratorState, interarrival_time: Expression): """Args: pieces(GeneratorState): number of pieces to create when there is an arrival. interarrival_time(Expression): interarrival time of pieces. """ super().__init__() self.generator = Generator(self, pieces, interarrival_time) self.exit = Exit(self) # - # It creates the generator and exit. The stations will have to be defined externally, so is possible to define multiple types of factories. # ## Running the simulation # # Let's make a simple simulation where one part arrive at time `t=0` and two parts at `t=1`, with processing times of one and two seconds. # # + pycharm={"name": "#%%\n"} from gsf.experiments.experiment_builders import DiscreteEventExperiment from gsf.core.mathematics.values import Value factory = FactorySystem([1, 2], Value(1)) # 1 and 2 parts every second station_1 = Station(factory, Value(1)) # 1 second processing time station_2 = Station(factory, Value(2)) # 2 seconds processing time # build the network factory.generator.add(station_1) # from generator to station_1 station_1.add(station_2) # from station_1 to station_2 station_2.add(factory.exit) # from station_2 to exit experiment = DiscreteEventExperiment(factory) experiment.simulation_control.start(stop_time=10) experiment.simulation_control.wait() print(factory.exit.get_output()) print(experiment.simulation_report.generate_report()) # - # You can see the network usign `factory.show()` # + pycharm={"name": "#%%\n"} factory.show() # - # ### Add expressions # # You can use framework's predefined expressions # + pycharm={"name": "#%%\n"} from gsf.core.mathematics.distributions import PoissonDistribution, ExponentialDistribution, TriangularDistribution from gsf.experiments.experiment_builders import DiscreteEventExperiment factory = FactorySystem(PoissonDistribution(5), ExponentialDistribution(0.5)) station_1 = Station(factory, TriangularDistribution(1, 2, 5)) station_2 = Station(factory, TriangularDistribution(1, 4, 5)) factory.generator.add(station_1) station_1.add(station_2) station_2.add(factory.exit) experiment = DiscreteEventExperiment(factory) experiment.simulation_control.start(stop_time=15) experiment.simulation_control.wait() print(factory.exit.get_output()) print(experiment.simulation_report.generate_report())
docs/examples/example3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Face Recognition Example # # In this example, we show how the MAX-Facial-Recognizer model can be used to recognize faces. # # ## Content of this demo # # We first use an example image to check that the REST API works, and then visualize the three test images, two of which are for <NAME> and one is for <NAME>. These images are passed to the MAX-Facial-Recognizer model to get their embedding vectors. Based on these vectors, we can identify whether two images belong to a same person or not. # # # # + language="bash" # # Install the required packages # pip install -q numpy scipy matplotlib Pillow requests # - import numpy as np import matplotlib from matplotlib import pyplot as plt import requests # + url = 'http://localhost:5000/model/predict' def facenet(input_img): """ Takes in input image file path and generates face embeddings/bboxes. """ files = {'image': ('image.jpg',open(input_img,'rb'), 'images/jpeg')} r = requests.post(url, files=files).json() return r # - def same_person(img1, img2, threshold=0.95): """ Determines if two images belong to the same face/person. This function is a simple example to show how to use the model to detect faces in images and generate the embedding vectors to identify people. The hard-coded `threshold` may not work for all images. With more label data, users could train a classifier (e.g., SVM) to perform the classification with higer accuracy. """ emb1 = facenet(img1)['predictions'][0]['embedding'] emb2 = facenet(img2)['predictions'][0]['embedding'] return np.linalg.norm(np.asarray(emb1)-np.asarray(emb2)) <= threshold ##Sanity check that it works preds = facenet('assets/Lenna.jpg') print(preds['status']) paul1 = 'assets/Paul.jpeg' paul2 = 'assets/Paul2.jpeg' chris = 'assets/Chris.jpeg' # + ##Visualize the test images fig = plt.figure() plt.subplot(1, 3, 1) plt.imshow(plt.imread(paul1)) plt.title("Paul1") plt.subplot(1, 3, 2) plt.imshow(plt.imread(paul2)) plt.title("Paul2") plt.subplot(1, 3, 3) plt.imshow(plt.imread(chris)) plt.title("Chris") plt.show() # - # Check if these two images belong to a same person print(same_person(paul1,paul2)) print(same_person(paul1,chris)) print(same_person(paul2,chris))
demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %autosave 0 # # MCPC rehearsal problem Oct 25 2017 at UCSY # # ## Problem D: Ceiling Function # # ### Input format # # - 1st Line: 1 integer, Number of Test Case, each Test Case has following data # + 1 Line: 2 integer, **n**(Number of data line), and **k**(Number of integer in each data line) # + **n** Lines: data line that contains **k** integers # # # ### Output format # # Case: (test case number): Number of Shape of Binary Tree # # ### Sample Input # # ``` # 2 # 5 3 # 2 7 1 # 3 1 4 # 1 5 9 # 2 6 5 # 9 7 3 # 3 4 # 3 1 2 40000 # 3 4 2 1 # 33 42 17 23 # ``` # # ### Sample Output # # ``` # Case 1:4 # Case 2:2 # ``` # ### Binary Tree # # Binary Tree Node has 2 child nodes (may be no child), which are also Binary Tree Nodes. # Left node has a value smaller than Parent, Right node has a value equal or greater than Parent. # Every nodes that has smaller value is attached Left side, and vice verse. # # 1st Node is called *root node*, which does not have parent. # #### Sample of Binary tree creation # # - 30 (Root) # - 20 (Attached to Left of 30) # - 25 (Attached to Right of 20. 25 is smaller than 30 but left is 30 is occupied by 20, so attached to 20) # - 50 (Attached to Right of 30) # - 23 (Attached to Left of 25) # - 18 (Attached to Left of 20) # ### Class # # Every object of Python is a member of Class (Instance). Integer and String are one of the Class. # Programmer can create new class with **class** keyword. **class** can be used as a data container (similar to *struct* in C). # Variables of class menber is called *attributes*, *attributes* are not declared before use. # # **class** can have *method* (function) in it. # # Some ploblems can become easier if **class** is used. # + # Version 0 simplest class definition class BinaryNode(): pass node30 = BinaryNode() node30.left = None node30.right = None node20 = BinaryNode() node20.left = None node20.right = None node30.left = node20 node25 = BinaryNode() node25.left = None node25.right = None node20.right = node25 nodes = [node30, node20, node25] for node in nodes: print(node.left, node.right) # + # Versino 0.1 simple and common class definition class BinaryNode(): def __init__(self, val, parent=None): self.val = val self.parent = parent self.left = None self.right = None node30 = BinaryNode(30) node20 = BinaryNode(20, node30) node30.left = node20 node25 = BinaryNode(25, node20) node20.right = node25 nodes = [node30, node20, node25] for node in nodes: print(node.val, node.left, node.right) # + # Versino 0.2 class with method class BinaryNode(): def __init__(self, val): self.val = val self.parent = None self.left = None self.right = None def attach_left(self, child): self.left = child child.parent = self def attach_right(self, child): self.right = child child.parent = self node30 = BinaryNode(30) node20 = BinaryNode(20) node30.attach_left(node20) # self: node30, child: node20 -> request node30 to attach node20 node25 = BinaryNode(25) node20.attach_right(node25) nodes = [node30, node20, node25] for node in nodes: print(node.val, node.left, node.right) # + # Versino 0.3 attach method calls its attach_left or attach_right class BinaryNode(): def __init__(self, val): self.val = val self.parent = None self.left = None self.right = None def attach_left(self, child): self.left = child child.parent = self def attach_right(self, child): self.right = child child.parent = self def attach(self, child): if child.val < self.val: self.attach_left(child) else: self.attach_right(child) node30 = BinaryNode(30) node20 = BinaryNode(20) node30.attach(node20) node25 = BinaryNode(25) node20.attach(node25) nodes = [node30, node20, node25] for node in nodes: print(node.val, node.left, node.right) # - # ### Recursion (Recursive Function) # # The function that calls itself is call *recursive*, this technique and idea is very oftenly used in programming contest. # Recursion is very important in object oriented programming as well. # # One of the typical usage of recursion is a Fibonacchi function. $$f(x) = f(x-1)+f(x-2)$$ # ### Message # # In SmallTalk (famous object oriented Langueage), every function call is treated as a message to the object. 1+1 means sending message +,1 to integer object **1**. # # Calling the method of another object can be considered as a *message*. # `self.left.attach(child)` means sending a message(method: attach, parameter: child) to the object **self.left** # + # Versino 1.0 recursive attach class BinaryNode(): def __init__(self, val): self.val = val self.parent = None self.left = None self.right = None def attach_left(self, child): if self.left == None: self.left = child child.parent = self else: self.left.attach(child) def attach_right(self, child): if self.right == None: self.right = child child.parent = self else: self.right.attach(child) def attach(self, child): if child.val < self.val: self.attach_left(child) else: self.attach_right(child) node30 = BinaryNode(30) node20 = BinaryNode(20) node30.attach(node20) node25 = BinaryNode(25) node20.attach(node25) nodes = [node30, node20, node25] for node in nodes: print(node.val, node.left, node.right) # - # Because above function handles recursive attachment, every node can be attach from **root** node. # + node30 = BinaryNode(30) node20 = BinaryNode(20) node30.attach(node20) node25 = BinaryNode(25) node30.attach(node25) # <--- can try to attach every node to root nodes = [node30, node20, node25] for node in nodes: print(node.val, node.left, node.right) # + # Versino 1.1 print_val class BinaryNode(): def __init__(self, val): self.val = val self.parent = None self.left = None self.right = None def attach_left(self, child): if self.left == None: self.left = child child.parent = self else: self.left.attach(child) def attach_right(self, child): if self.right == None: self.right = child child.parent = self else: self.right.attach(child) def attach(self, child): if child.val < self.val: self.attach_left(child) else: self.attach_right(child) def print_val(self): print(self.val, self.left != None, self.right != None) if self.left: self.left.print_val() if self.right: self.right.print_val() node30 = BinaryNode(30) node20 = BinaryNode(20) node30.attach(node20) # self: node30, child: node20 -> request node30 to attach node20 node25 = BinaryNode(25) node30.attach(node25) node50 = BinaryNode(50) node30.attach(node50) node23 = BinaryNode(23) node30.attach(node23) node18 = BinaryNode(18) node30.attach(node18) node30.print_val() print() node20.print_val() # - # ### Counting number of shapes # # Above print_val() function print left side first until bottom of the tree, it is call **Depth First Search**. # # The shape of each node can be represented by (self.left != None, self.right != None), if it's (True, True) the node has children in both Left and Right. # If checking order is same and value of every nodes are same, the shape of whole tree is considered to be same. # # It's easy in Python to compare List, so add the sequence of node value if it's not already exists and count number of element will produce the result. # ### Sample Program to solve this problem # + # #!/usr/bin/python3 # -*- coding: utf-8 -*- ''' 2017 MCPC at UCSY Problem-D: Ceiling Function ''' import sys class TestCase(): pass def parse_tc(tc): ''' Input: Test Case Update: Return: None ''' tc.n, tc.k = tuple(map(int,sys.stdin.readline().split())) tc.num2d = list() for i in range(tc.n): nums = tuple(map(int, sys.stdin.readline().split())) tc.num2d.append(nums) return class BinTreeNode(): def __init__(self, val): self.val = val self.parent = None self.left = None self.right = None def add_left(self, left): left.parent = self self.left = left def add_right(self, right): right.parent = self self.right = right def add_child(self, child): if child.val < self.val: if self.left == None: self.add_left(child) else: self.left.add_child(child) else: if self.right == None: self.add_right(child) else: self.right.add_child(child) def add_shape(self, shape): shape.append((self.parent != None, self.left != None, self.right != None)) if self.left != None: self.left.add_shape(shape) if self.right != None: self.right.add_shape(shape) def shape_of_nums(nums): ''' nums: tuple of integers return: list of [(True, False, False), ...] of binary tree ''' root = BinTreeNode(nums[0]) for i in range(1, len(nums)): node = BinTreeNode(nums[i]) root.add_child(node) shape = list() root.add_shape(shape) # print(shape) return shape def solve(tc): ''' Input: Test Case Return: None ''' parse_tc(tc) x = list() for nums in tc.num2d: shape = shape_of_nums(nums) if shape not in x: x.append(shape) print(len(x)) return ## ## Main routine ## if __name__ == '__main__': tc = TestCase() tc.t = int(sys.stdin.readline()) for i in range(tc.t): print('Case ', i+1, ':', sep='', end='') solve(tc)
MCPC-Rehearsal-D.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="Bp8t2AI8i7uP" # ##### Copyright 2020 The TensorFlow Authors. # + cellView="form" colab={} colab_type="code" id="rxPj2Lsni9O4" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="6xS-9i5DrRvO" # # Customizing a Transformer Encoder # + [markdown] colab_type="text" id="Mwb9uw1cDXsa" # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/official_models/nlp/customize_encoder"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/models/blob/master/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/models/official/colab/nlp/customize_encoder.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] colab_type="text" id="iLrcV4IyrcGX" # ## Learning objectives # # The [TensorFlow Models NLP library](https://github.com/tensorflow/models/tree/master/official/nlp/modeling) is a collection of tools for building and training modern high performance natural language models. # # The [TransformEncoder](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py) is the core of this library, and lots of new network architectures are proposed to improve the encoder. In this Colab notebook, we will learn how to customize the encoder to employ new network architectures. # + [markdown] colab_type="text" id="YYxdyoWgsl8t" # ## Install and import # + [markdown] colab_type="text" id="fEJSFutUsn_h" # ### Install the TensorFlow Model Garden pip package # # * `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`, # which is the nightly Model Garden package created daily automatically. # * `pip` will install all models and dependencies automatically. # + colab={} colab_type="code" id="thsKZDjhswhR" # !pip install -q tf-models-official==2.3.0 # + [markdown] colab_type="text" id="hpf7JPCVsqtv" # ### Import Tensorflow and other libraries # + colab={} colab_type="code" id="my4dp-RMssQe" import numpy as np import tensorflow as tf from official.modeling import activations from official.nlp import modeling from official.nlp.modeling import layers, losses, models, networks # + [markdown] colab_type="text" id="vjDmVsFfs85n" # ## Canonical BERT encoder # # Before learning how to customize the encoder, let's firstly create a canonical BERT enoder and use it to instantiate a `BertClassifier` for classification task. # + colab={} colab_type="code" id="Oav8sbgstWc-" cfg = { "vocab_size": 100, "hidden_size": 32, "num_layers": 3, "num_attention_heads": 4, "intermediate_size": 64, "activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "sequence_length": 16, "type_vocab_size": 2, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } bert_encoder = modeling.networks.TransformerEncoder(**cfg) def build_classifier(bert_encoder): return modeling.models.BertClassifier(bert_encoder, num_classes=2) canonical_classifier_model = build_classifier(bert_encoder) # + [markdown] colab_type="text" id="Qe2UWI6_tsHo" # `canonical_classifier_model` can be trained using the training data. For details about how to train the model, please see the colab [fine_tuning_bert.ipynb](https://github.com/tensorflow/models/blob/master/official/colab/fine_tuning_bert.ipynb). We skip the code that trains the model here. # # After training, we can apply the model to do prediction. # # + colab={} colab_type="code" id="csED2d-Yt5h6" def predict(model): batch_size = 3 np.random.seed(0) word_ids = np.random.randint( cfg["vocab_size"], size=(batch_size, cfg["sequence_length"])) mask = np.random.randint(2, size=(batch_size, cfg["sequence_length"])) type_ids = np.random.randint( cfg["type_vocab_size"], size=(batch_size, cfg["sequence_length"])) print(model([word_ids, mask, type_ids], training=False)) predict(canonical_classifier_model) # + [markdown] colab_type="text" id="PzKStEK9t_Pb" # ## Customize BERT encoder # # One BERT encoder consists of an embedding network and multiple transformer blocks, and each transformer block contains an attention layer and a feedforward layer. # + [markdown] colab_type="text" id="rmwQfhj6fmKz" # We provide easy ways to customize each of those components via (1) # [EncoderScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py) and (2) [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py). # + [markdown] colab_type="text" id="xsMgEVHAui11" # ### Use EncoderScaffold # # `EncoderScaffold` allows users to provide a custom embedding subnetwork # (which will replace the standard embedding logic) and/or a custom hidden layer class (which will replace the `Transformer` instantiation in the encoder). # + [markdown] colab_type="text" id="-JBabpa2AOz8" # #### Without Customization # # Without any customization, `EncoderScaffold` behaves the same the canonical `TransformerEncoder`. # # As shown in the following example, `EncoderScaffold` can load `TransformerEncoder`'s weights and output the same values: # + colab={} colab_type="code" id="ktNzKuVByZQf" default_hidden_cfg = dict( num_attention_heads=cfg["num_attention_heads"], intermediate_size=cfg["intermediate_size"], intermediate_activation=activations.gelu, dropout_rate=cfg["dropout_rate"], attention_dropout_rate=cfg["attention_dropout_rate"], kernel_initializer=tf.keras.initializers.TruncatedNormal(0.02), ) default_embedding_cfg = dict( vocab_size=cfg["vocab_size"], type_vocab_size=cfg["type_vocab_size"], hidden_size=cfg["hidden_size"], seq_length=cfg["sequence_length"], initializer=tf.keras.initializers.TruncatedNormal(0.02), dropout_rate=cfg["dropout_rate"], max_seq_length=cfg["sequence_length"], ) default_kwargs = dict( hidden_cfg=default_hidden_cfg, embedding_cfg=default_embedding_cfg, num_hidden_instances=cfg["num_layers"], pooled_output_dim=cfg["hidden_size"], return_all_layer_outputs=True, pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(0.02), ) encoder_scaffold = modeling.networks.EncoderScaffold(**default_kwargs) classifier_model_from_encoder_scaffold = build_classifier(encoder_scaffold) classifier_model_from_encoder_scaffold.set_weights( canonical_classifier_model.get_weights()) predict(classifier_model_from_encoder_scaffold) # + [markdown] colab_type="text" id="sMaUmLyIuwcs" # #### Customize Embedding # # Next, we show how to use a customized embedding network. # # We firstly build an embedding network that will replace the default network. This one will have 2 inputs (`mask` and `word_ids`) instead of 3, and won't use positional embeddings. # + colab={} colab_type="code" id="LTinnaG6vcsw" word_ids = tf.keras.layers.Input( shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(cfg['sequence_length'],), dtype=tf.int32, name="input_mask") embedding_layer = modeling.layers.OnDeviceEmbedding( vocab_size=cfg['vocab_size'], embedding_width=cfg['hidden_size'], initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) attention_mask = layers.SelfAttentionMask()([word_embeddings, mask]) new_embedding_network = tf.keras.Model([word_ids, mask], [word_embeddings, attention_mask]) # + [markdown] colab_type="text" id="HN7_yu-6O3qI" # Inspecting `new_embedding_network`, we can see it takes two inputs: # `input_word_ids` and `input_mask`. # + colab={} colab_type="code" id="fO9zKFE4OpHp" tf.keras.utils.plot_model(new_embedding_network, show_shapes=True, dpi=48) # + [markdown] colab_type="text" id="9cOaGQHLv12W" # We then can build a new encoder using the above `new_embedding_network`. # + colab={} colab_type="code" id="mtFDMNf2vIl9" kwargs = dict(default_kwargs) # Use new embedding network. kwargs['embedding_cls'] = new_embedding_network kwargs['embedding_data'] = embedding_layer.embeddings encoder_with_customized_embedding = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_customized_embedding) # ... Train the model ... print(classifier_model.inputs) # Assert that there are only two inputs. assert len(classifier_model.inputs) == 2 # + [markdown] colab_type="text" id="Z73ZQDtmwg9K" # #### Customized Transformer # # User can also override the [hidden_cls](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/encoder_scaffold.py#L103) argument in `EncoderScaffold`'s constructor to employ a customized Transformer layer. # # See [ReZeroTransformer](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/rezero_transformer.py) for how to implement a customized Transformer layer. # # Following is an example of using `ReZeroTransformer`: # # + colab={} colab_type="code" id="uAIarLZgw6pA" kwargs = dict(default_kwargs) # Use ReZeroTransformer. kwargs['hidden_cls'] = modeling.layers.ReZeroTransformer encoder_with_rezero_transformer = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_rezero_transformer) # ... Train the model ... predict(classifier_model) # Assert that the variable `rezero_alpha` from ReZeroTransformer exists. assert 'rezero_alpha' in ''.join([x.name for x in classifier_model.trainable_weights]) # + [markdown] colab_type="text" id="6PMHFdvnxvR0" # ### Use [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py) # # The above method of customizing `Transformer` requires rewriting the whole `Transformer` layer, while sometimes you may only want to customize either attention layer or feedforward block. In this case, [TransformerScaffold](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py) can be used. # # # + [markdown] colab_type="text" id="D6FejlgwyAy_" # #### Customize Attention Layer # # User can also override the [attention_cls](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/transformer_scaffold.py#L45) argument in `TransformerScaffold`'s constructor to employ a customized Attention layer. # # See [TalkingHeadsAttention](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/talking_heads_attention.py) for how to implement a customized `Attention` layer. # # Following is an example of using [TalkingHeadsAttention](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/talking_heads_attention.py): # + colab={} colab_type="code" id="nFrSMrZuyNeQ" # Use TalkingHeadsAttention hidden_cfg = dict(default_hidden_cfg) hidden_cfg['attention_cls'] = modeling.layers.TalkingHeadsAttention kwargs = dict(default_kwargs) kwargs['hidden_cls'] = modeling.layers.TransformerScaffold kwargs['hidden_cfg'] = hidden_cfg encoder = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder) # ... Train the model ... predict(classifier_model) # Assert that the variable `pre_softmax_weight` from TalkingHeadsAttention exists. assert 'pre_softmax_weight' in ''.join([x.name for x in classifier_model.trainable_weights]) # + [markdown] colab_type="text" id="kuEJcTyByVvI" # #### Customize Feedforward Layer # # Similiarly, one could also customize the feedforward layer. # # See [GatedFeedforward](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/gated_feedforward.py) for how to implement a customized feedforward layer. # # Following is an example of using [GatedFeedforward](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/gated_feedforward.py). # + colab={} colab_type="code" id="XAbKy_l4y_-i" # Use TalkingHeadsAttention hidden_cfg = dict(default_hidden_cfg) hidden_cfg['feedforward_cls'] = modeling.layers.GatedFeedforward kwargs = dict(default_kwargs) kwargs['hidden_cls'] = modeling.layers.TransformerScaffold kwargs['hidden_cfg'] = hidden_cfg encoder_with_gated_feedforward = modeling.networks.EncoderScaffold(**kwargs) classifier_model = build_classifier(encoder_with_gated_feedforward) # ... Train the model ... predict(classifier_model) # Assert that the variable `gate` from GatedFeedforward exists. assert 'gate' in ''.join([x.name for x in classifier_model.trainable_weights]) # + [markdown] colab_type="text" id="a_8NWUhkzeAq" # ### Build a new Encoder using building blocks from KerasBERT. # # Finally, you could also build a new encoder using building blocks in the modeling library. # # See [AlbertTransformerEncoder](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/albert_transformer_encoder.py) as an example: # # + colab={} colab_type="code" id="xsiA3RzUzmUM" albert_encoder = modeling.networks.AlbertTransformerEncoder(**cfg) classifier_model = build_classifier(albert_encoder) # ... Train the model ... predict(classifier_model) # + [markdown] colab_type="text" id="MeidDfhlHKSO" # Inspecting the `albert_encoder`, we see it stacks the same `Transformer` layer multiple times. # + colab={} colab_type="code" id="Uv_juT22HERW" tf.keras.utils.plot_model(albert_encoder, show_shapes=True, dpi=48)
official/colab/nlp/customize_encoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + language="markdown" # ## Resources # USDA data: https://fdc.nal.usda.gov/download-datasets.html # # USDA schema: https://fdc.nal.usda.gov/portal-data/external/dataDictionary # emission # GHG emissions by use: https://ourworldindata.org/food-choice-vs-eating-local # # Article on CO2 emissions per calorie: https://academic.oup.com/ajcn/article/101/1/184/4564263 # - import pandas as pd import numpy as np import requests from envs import env import json from dataclasses import dataclass GHG_EMISSIONS_DF = pd.read_csv('./data/ghg/GHG-emissions-by-life-cycle-stage-OurWorldinData-upload.csv') GHG_EMISSIONS_DF = GHG_EMISSIONS_DF.rename(columns=lambda column: column.replace(' ', '_').lower()).drop(columns='unnamed:_8') GHG_EMISSIONS_DF = GHG_EMISSIONS_DF.rename(columns={'packging': 'packaging'}) GHG_EMISSIONS_DF USDA_FOOD_DF = pd.read_csv('./data/usda/food.csv') USDA_FOOD_NUTRIENT_DF = pd.read_csv('./data/usda/food_nutrient.csv') USDA_NUTRIENT_DF = pd.read_csv('./data/usda/nutrient.csv') USDA_NUTRIENT_DF # + from dataclasses import field # create mapping of GHG food groups to FDC IDs @dataclass class GHG_FDC_Map: ghg_product_name: str fdc_id: int notes: str = "" tags: dict = field(default_factory=dict) with open('./ghg-fdc-mapping.json', 'r') as f: mappings = json.load(f) GHG_FDC_MAPPING = [] for mapping in mappings: if mapping['fdc_id']: GHG_FDC_MAPPING.append(GHG_FDC_Map(**mapping)) # + @dataclass class Food: emissions: pd.DataFrame name: str nutrients: pd.DataFrame macro_nutrient_emissions: pd.DataFrame @dataclass class Macro: name: str g_to_kcal: int usda_name: str food_dicts = [] food_df = None PROTEIN = 'Protein' FAT = 'Fat' CARB = 'Carbohydrate' ENERGY = 'Energy' KCAL = 'KCAL' MACROS = { 'Carbohydrate, by difference': Macro( name=CARB, g_to_kcal=4, usda_name='Carbohydrate, by difference' ), 'Total lipid (fat)': Macro( name=FAT, g_to_kcal=9, usda_name='Total lipid (fat)' ), 'Protein': Macro( name=PROTEIN, g_to_kcal=4, usda_name='Protein' ), } USDA_MACRO_NAMES = MACROS.keys() EMISSIONS = ['land_use_change','animal_feed','farm','processing','transport', 'packaging', 'retail'] def add_macro_in_kcal(df, amount_per_kg_col=None, amount_per_kcal_col=None): df_new = df.copy() for macro in USDA_MACRO_NAMES: macro_loc = df.nutrient_name == macro df_new.loc[macro_loc, amount_per_kcal_col] = df_new.loc[macro_loc, amount_per_kg_col] * MACROS[macro].g_to_kcal return df_new def get_total_energy_in_kcal(df, nutrient_name_col=None, nutrient_unit_name_col=None, nutrient_amount_per_kg_col=None): energy = df.loc[ (df[nutrient_name_col] == ENERGY) & (df[nutrient_unit_name_col] == KCAL), nutrient_amount_per_kg_col ].iloc[0] return energy def add_product_name(df, product_name_col=None, product_name=None): df_new = df.copy() df_new['product_name'] = mapping.ghg_product_name return df_new def convert_grams_in_100s_to_kg(df, nutrient_amount_per_100g_col='nutrient_amount_per_100g', nutrient_amount_per_kg_col='nutrient_amount_per_kg'): df_new = df.copy() df_new[nutrient_amount_per_kg_col] = df_new[nutrient_amount_per_100g_col] * 10 return df_new def add_nutrient_mass_fraction(df, nutrient_mass_fraction_per_kg_col='nutrient_mass_fraction_per_kg', nutrient_amount_per_kg_col='nutrient_amount_per_kg'): df_new = df.copy() df_new[nutrient_mass_fraction_per_kg_col] = df_new[nutrient_amount_per_kg_col] / 1000.0 return df_new def save_artifact(df, filepath): df.to_csv(filepath, index=False) def subset_by_nutrients(df, nutrient_name_col='nutrient_name', nutrients=[]): return df.loc[df[nutrient_name_col].isin(nutrients)] def add_fraction_of_total_energy(df, total_energy_in_kcal=None, fraction_of_total_energy_col='nutrient_fraction_of_total_energy', nutrient_amount_in_kcal_col='nutrient_amount_in_kcal' ): df_new = df.copy() df_new[fraction_of_total_energy_col] = df_new[nutrient_amount_in_kcal_col].divide(total_energy_in_kcal) return df_new def build_emissions_df_from_product_name(product_name=None): return GHG_EMISSIONS_DF.loc[GHG_EMISSIONS_DF['food_product'] == product_name] def join_nutrients_with_emissions(emissions_df, nutrients_df): df_new = pd.merge(nutrients_df, emissions_df, left_on='product_name', right_on='food_product').drop(columns='food_product') def rename(column): to_rename = emissions_df.columns.values if column in to_rename: return f'emissions_{column}' return column df_new = df_new.rename(columns=rename) return df_new def add_nutrient_emission_efficiency(df, nutrient_fraction_of_total_energy_col=None): df_new = df.copy() new_cols = [f'{emission}_nutrient_emission_inverse_efficiency' for emission in EMISSIONS] # land_use_change in kg CO2 / kg product # nutrient fraction of total energy = (kcal macro / kcal total energy) df_new[new_cols] = df_new[EMISSIONS].divide(df_new[nutrient_fraction_of_total_energy_col], axis=0) df_new['nutrient_emission_efficiency_units'] = '(kg CO2 / kg product) / (kcal macro / kcal total energy)' return df_new def build_nutrients_df_from_fdc_id(fdc_id): df = USDA_FOOD_DF.loc[USDA_FOOD_DF['fdc_id'] == fdc_id] df = pd.merge(df, USDA_FOOD_NUTRIENT_DF, how='left', on='fdc_id')[['fdc_id', 'description', 'nutrient_id', 'amount']] df = pd.merge(df, USDA_NUTRIENT_DF, left_on='nutrient_id', right_on='id')[['fdc_id', 'description', 'nutrient_id', 'amount', 'name', 'unit_name', 'nutrient_nbr']] df = df.rename(columns={'amount': 'nutrient_amount_per_100g', 'name': 'nutrient_name', 'unit_name': 'nutrient_unit_name'}) return df def add_nutrient_canonical_name(df, nutrient_name_col=None, nutrient_canonical_name_col=None): df_new = df.copy() for i, nutrient_name in enumerate(df_new[nutrient_name_col].values): canonical_name = None if nutrient_name in MACROS.keys(): canonical_name = MACROS[nutrient_name].name df_new.loc[i, nutrient_canonical_name_col] = canonical_name return df_new def add_total_emissions(df): df_new = df.copy() df_new['total'] = df_new[EMISSIONS].sum(axis=1) return df_new def add_emissions_units(df): df_new = df.copy() df_new['emissions_units'] = 'CO2 equivilant emissions (kg) / product mass (kg)' return df_new def add_nutrient_emissions(df): df_new = df.copy() df_new['nutrient_mass_emissions'] = 1.0 / df_new['nutrient_mass_fraction'] * df_new.emissions_total df_new['nutrient_mass_emissions_units'] = 'kg CO2 / kg nutrient' return df_new def add_category(df, mapping): df_new = df.copy() df_new['product_category'] = mapping.tags['category'] return df_new # + for mapping in GHG_FDC_MAPPING: nutrients_df = build_nutrients_df_from_fdc_id(mapping.fdc_id) nutrients_df = convert_grams_in_100s_to_kg(nutrients_df, nutrient_amount_per_100g_col='nutrient_amount_per_100g', nutrient_amount_per_kg_col='nutrient_amount_per_kg') nutrients_df = add_nutrient_mass_fraction( nutrients_df, nutrient_mass_fraction_per_kg_col='nutrient_mass_fraction', nutrient_amount_per_kg_col='nutrient_amount_per_kg' ) nutrients_df = add_product_name(nutrients_df, product_name_col='product_name', product_name=mapping.ghg_product_name) nutrients_df = add_nutrient_canonical_name(nutrients_df, nutrient_name_col='nutrient_name', nutrient_canonical_name_col='nutrient_canonical_name') nutrients_df = add_category(nutrients_df, mapping) save_artifact(nutrients_df, 'data/artifacts/food-nutrients-{mapping.ghg_product_name}.csv') total_energy_in_kcal = get_total_energy_in_kcal( nutrients_df, nutrient_name_col='nutrient_name', nutrient_unit_name_col='nutrient_unit_name', nutrient_amount_per_kg_col='nutrient_amount_per_kg' ) macro_nutrients_df = subset_by_nutrients(nutrients_df, nutrient_name_col='nutrient_name', nutrients=USDA_MACRO_NAMES) macro_nutrients_df = add_macro_in_kcal(macro_nutrients_df, amount_per_kg_col='nutrient_amount_per_kg', amount_per_kcal_col='nutrient_amount_in_kcal') macro_nutrients_df = add_fraction_of_total_energy( macro_nutrients_df, total_energy_in_kcal=total_energy_in_kcal, nutrient_amount_in_kcal_col='nutrient_amount_in_kcal', fraction_of_total_energy_col='nutrient_fraction_of_total_energy', ) emissions_df = build_emissions_df_from_product_name(product_name=mapping.ghg_product_name) emissions_df = add_total_emissions(emissions_df) macro_nutrient_emissions_df = join_nutrients_with_emissions(emissions_df=emissions_df, nutrients_df=macro_nutrients_df,) macro_nutrient_emissions_df = add_emissions_units(macro_nutrient_emissions_df) macro_nutrient_emissions_df = add_nutrient_emissions(macro_nutrient_emissions_df) # macro_nutrient_emissions_df = add_nutrient_emission_efficiency(macro_nutrient_emissions_df, nutrient_fraction_of_total_energy_col='nutrient_fraction_of_total_energy') if food_df is None: food_df = macro_nutrient_emissions_df else: food_df = pd.concat([food_df, macro_nutrient_emissions_df]) food_dict = Food( emissions=emissions_df, nutrients=macro_nutrients_df, name=mapping.ghg_product_name, macro_nutrient_emissions=macro_nutrient_emissions_df ) food_dicts.append(food_dict) food_df # + def remove_nans_and_infs(df): return df[~df.isin([np.nan, np.inf, -np.inf]).any(1)] def filter_by_macro(df, macro): return df.loc[df['nutrient_name'] == macro] # + language="markdown" # # ## Plot emission density by caloric density # - # !pip install adjustText # + from adjustText import adjust_text MACRO_LIMITS = { CARB: (-10, 300), FAT: (-10, 210), PROTEIN: (-10, 550), } def label_point(x, y, val, ax): a = pd.concat({'x': x, 'y': y, 'val': val}, axis=1) texts = [] for i, point in a.iterrows(): texts.append(ax.text(point['x'], point['y'], str(point['val']))) adjust_text(texts, arrowprops=dict(arrowstyle='->', color='black')) macros = dict() for macro in MACROS: macro_name = MACROS[macro].name to_plot = food_df[['nutrient_mass_fraction', 'emissions_total', 'emissions_units', 'nutrient_name', 'product_name', 'product_category']] to_plot = remove_nans_and_infs(to_plot) to_plot = to_plot.loc[to_plot['nutrient_name'] == macro] save_artifact(to_plot, f'data/artifacts/food-{macro_name}-emission-efficiency.csv') # to_plot = filter_by_macro(to_plot, macro) # to_plot = to_plot.set_index('nutrient_amount_per_kg') ax = to_plot.plot.scatter('nutrient_mass_fraction', 'emissions_total', title=macro, figsize=(12, 12)) ax.set_xlabel(f'{macro_name} Mass Fraction [kg {macro_name} / kg product]') ax.set_ylabel('Total Emissions [kg CO2 / kg product]') # label_point(to_plot.nutrient_amount_per_kg, to_plot.emissions_total, to_plot.product_name, ax) fig = ax.get_figure() fig.tight_layout() fig.savefig(f'./data/artifacts/plots/{macro_name}-nutrient-emission-by-energy-density.png') # to_plot # + language="markdown" # ## Plot Nutrient Emission Inverse Efficiency # - EMISSION_COLUMNS = { 'emissions_total': 'Total' } MACRO_LIMITS = { CARB: (-10, 300), FAT: (-10, 400), PROTEIN: (-10, 350), } macros = dict() for macro in MACROS: macros[macro] = food_df.loc[food_df['nutrient_name'] == macro] macro_name = MACROS[macro].name to_plot = macros[macro][[*EMISSION_COLUMNS] + ['product_name', 'nutrient_mass_fraction', 'nutrient_mass_emissions']] # to_plot = to_plot.rename(columns=EMISSION_COLUMNS) # total = to_plot.sum(axis=1) to_plot = remove_nans_and_infs(to_plot) to_plot = to_plot.iloc[to_plot['nutrient_mass_emissions'].argsort()] to_plot = to_plot.drop(['nutrient_mass_fraction', 'emissions_total'], axis=1) to_plot = to_plot.set_index('product_name', verify_integrity=True) ax = to_plot.plot.barh(stacked=True, title=f'{macro_name} Nutrient Emission Inverse Efficiency', figsize=(10, 6), xlim=MACRO_LIMITS[macro_name]) ax.set_xlabel(f'{macro_name} Emissions [(kg CO2 / kg {macro_name})]') ax.set_ylabel('Product') fig = ax.get_figure() fig.tight_layout() fig.savefig(f'./data/artifacts/plots/{macro_name}-emissions-per-kg.png') for food in food_dicts: to_plot = food.macro_nutrient_emissions[BARS_TO_PLOT + ['nutrient_canonical_name']] to_plot = to_plot.set_index('nutrient_canonical_name',) ax = to_plot.plot.bar(stacked=True, title=f'Nutrient Emission Inverse Efficiency for {food.name}', figsize=(8,8)) ax.set_ylabel(f'Nutrient Emission Inverse Efficiency') ax.set_xlabel('Macro Nutrient')
food-emissions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.6 64-bit # metadata: # interpreter: # hash: 122b684e0147009c315d6711cdbb4bc67c0b28d0ba10cdfcd148f2449410c391 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/jafetimbre/optimus/blob/master/src/lab_8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="FZIp4lcWqvf5" outputId="81481238-16d6-4510-f197-81dcb105eb2d" colab={"base_uri": "https://localhost:8080/"} print('dsa') # + id="i7DHprBUqxrt" outputId="3b6da7be-1e48-454e-820e-844ac0283978" colab={"base_uri": "https://localhost:8080/"} # !pip install pymongo # + id="WgT_UiFCr2TP" outputId="7e3da9aa-3fed-4193-92e7-ff9df97f68a8" colab={"base_uri": "https://localhost:8080/"} # !pip install dnspython # + id="p8tH9bwHrtqN" import pymongo # + id="cjYiBeumxh0c" import pprint # + id="vHwtY-HzqvgG" password = '<PASSWORD>' client = pymongo.MongoClient(f"mongodb+srv://collab-access:<{password}>@cluster0.i4tb5.mongodb.net/myFirstDatabase?retryWrites=true&w=majority") # + id="NzjRPD1wrkwr" lab_db = client['lab_database'] # + id="29uEU3xgsOAm" users = lab_db['users'] news = lab_db.news comments = lab_db.comments adds = lab_db.adds # + id="Bx_I3hNtw5xa" sample_user = {"name": "Mike", "age": 23} res = users.insert_one(sample_user) print(res) # + id="y20bF0Wcxk2H"
src/lab_8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Kiri Core Example: Semantic Search # # This is a notebook that walks through some of the core functionality of Kiri's semantic search. # # ## What's semantic search? # In a 'typical' search system, search is performed by matching keywords. While sometimes useful, this often requires guesswork on behalf of the person searching. # They might not be aware of what content exists in the collection they're searching, and have to hope that their queries match words or phrases in the documents. # # Semantic search, alternatively, uses word embeddings to understand what your query *means*, and compare that to embeddings of documents. This allows the search engine # to find results that closely match the meaning of the query, regardless of exact phrasing and words used. # # Kiri Search # # Search is supported in over 50 languages. It defaults to just English for optimised performance. We'll see how to specify other languages later. # # We've got two flavors to look at: In-memory, and Elastic-based search. # # The in-memory version is meant for local testing, experimenting -- dev stuff. # # Using an Elastic backend is more suitable for production. # # We'll start with memory. # If you've got one, change it here. api_key = None # To initialise Kiri for multilingual search, initialise it with: # # ```python # kiri = Kiri(vectorisation_model="multilingual") # ``` # # This supports 50+ languages: ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw. # + from kiri import Kiri, InMemoryDocStore # If a DocStore isn't provided when Kiri is instantiated, it'll default to an InMemoryDocStore. # Explicitness here for clarity. m_doc_store = InMemoryDocStore() if api_key: kiri = Kiri(m_doc_store, api_key=api_key) else: kiri = Kiri(m_doc_store, local=True) # - # ### Documents # # Documents are the base object on which much of Kiri operates. # # At their simplest, they're a wrapper for a string of content. # # Upon uploading a document to a DocStore, it is processed -- the # `vectorisation_model` parameter of the Kiri Core is used to encode # the entire document as a vector, which is then used for search. # # There are variants of Documents that will be covered later. # + from kiri import Document # The only required argument for a Document is some text content content = "The Kiri Natural Language Engine is a high-level library for NLP tasks: a sweet suite of modules." # Doc attributes are abritray -- put in whatever metadata you might need for your use case. attributes = {"title": "Kiri Search Demo", "url": "https://kiri.ai"} doc = Document(content=content, attributes=attributes) kiri.upload([doc]) # - # ##### Brief Note on Uploading Content # Kiri's upload process simply passes the supplied documents along to the upload function of whichever DocStore you provided on instantiation. # # This allows you to customize the upload process with your own preprocessing steps by extending an existing DocStore class. # # In a similar vein, Kiri allows you to supply your own Document vectorization function to the DocStore upload -- however, that will be covered in a separate notebook with some other 'advanced' features. # ### Searching # # Once you've uploaded content to the DocStore, you're ready to search. It's as simple as that. # # For an in-memory search, you can provide a list of IDs to narrow search down. # However, all you *need* to provide is a query. # Docs will be retrieved from the DocStore, and sent to the result processing function along with # a query vector. # # When processing results, comparisons are performed between the query and document vectors to calculate # each document's relevancy score. # + results = kiri.search("What's Kiri?", max_results=10, min_score=0.01) # If you haven't modified this, then... well, you're going to get one result. # Find some text you want to search yourself, and try making some Documents! print(f"Results found: {results.total_results}") print("======") for res in results.results: # You can access your attributes here. print(res.document.attributes["title"]) print(res.preview) print(f"Score: {str(res.score)}") print("------") # - # ## What's Elastic? # # Elasticsearch is an open source search engine -- it's commonly used in enterprise. # It's quick, scalable, and documents are schema-free. # # Elastic uses the BM25 bag-of-words model which is then combined with our semantic search to achieve the best performance possible. # # ## Why use it with Kiri? # # Elastic provides a production-ready backend for search with very little overhead required. # # When used as a backend with Kiri, you get all the benefits of Elastic, enhanced with Kiri's # semantic processing. # # #### Setting up Elasticsearch # ##### With Docker # There's a simple Docker one-liner that can be used to instantly set up Elasticsearch. # # `docker run -p 9200:9200 -d -e "discovery.type=single-node" elasticsearch:7.10.1` # # The `-d` flag refers to detached mode, meaning it will run in the background. Omit `-d` to get logs in your terminal instance. # # If you'd like to host the Docker instance remotely, AWS has a small free-tier instance. It has 750 monthly hours, so you can leave it constantly running. # # ##### Local only # Refer to [this guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/getting-started-install.html) to get Elasticsearch running locally on your computer. # # Once you've got it installed and running, it's easy to use with Kiri. # # #### Elastic-specific Kiri classes # Kiri includes variants of its Document and DocStore classes (ElasticDocument and ElasticDocStore). # These variants handle the necessary procsesing to turn a Document object into an Elastic-ready one, # as well as back into an object when retrieved *from* Elasticsearch. # # In practice, this makes it incredibly easy to switch from a dev environment with `InMemorySearch` and standard `Documents` to an Elastic one ready for production. # + from kiri import ElasticDocStore, ElasticDocument # An Elasticsearch index is essentially a database e_doc_store = ElasticDocStore("http://localhost:9200", doc_class=ElasticDocument, index="kiri_example") if api_key: e_kiri = Kiri(e_doc_store, api_key=api_key) else: e_kiri = Kiri(e_doc_store, local=True) # Making a document just like before. # This time, we'll use the ElasticDocument class. e_content = "Kiri works with Elasticsearch as a backend. Nice." e_attrs = {"title": "Kiri Elastic Demo", "foo": "bar", "url": "https://kiri.ai"} e_doc = ElasticDocument(content=e_content, attributes=e_attrs) e_kiri.upload([e_doc]) # - # ### Searching an ElasticDocStore # # Performing a search on an `ElasticDocStore` can be just as simple as the `InMemoryDocStore`. All you need is a query. # # Kiri defaults to creating an Elastic query that checks cosine similarity between document and query vectors. # However, Elasticsearch includes capabilities to search via more complex queries. This will be covered in more detail in a separate notebook. # + # Easy... e_results = e_kiri.search("What backends does Kiri support?", max_results=10, min_score=0.01) print(f"Results found: {e_results.total_results}") print("======") for res in e_results.results: print(res.document.attributes["title"]) # Again, can print any attributes you added. print(f"Document Foo: {res.document.attributes['foo']}") print(res.preview) print(f"Score: {str(res.score)}") print("------")
examples/core_functionality/Search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"is_executing": false} import copy import math import itertools import numpy as np import matplotlib.pyplot as plt from graph_based_slam import calc_rotational_matrix, calc_jacobian, cal_observation_sigma, \ calc_input, observation, motion_model, Edge, pi_2_pi # %matplotlib inline np.set_printoptions(precision=3, suppress=True) np.random.seed(0) # - # ### Introduction # # In contrast to the probabilistic approaches for solving SLAM, such as EKF, UKF, particle filters, and so on, the graph technique formulates the SLAM as an optimization problem. It is mostly used to solve the full SLAM problem in an offline fashion, i.e. optimize all the poses of the robot after the path has been traversed. However, some variants are availble that uses graph-based approaches to perform online estimation or to solve for a subset of the poses. # # GraphSLAM uses the motion information as well as the observations of the environment to create least square problem that can be solved using standard optimization techniques. # # ### Minimal Example # The following example illustrates the main idea behind graphSLAM. # A simple case of a 1D robot is considered that can only move in 1 direction. The robot is commanded to move forward with a control input $u_t=1$, however, the motion is not perfect and the measured odometry will deviate from the true path. At each timestep the robot can observe its environment, for this simple case as well, there is only a single landmark at coordinates $x=3$. The measured observations are the range between the robot and landmark. These measurements are also subjected to noise. No bearing information is required since this is a 1D problem. # # To solve this, graphSLAM creates what is called as the system information matrix $\Omega$ also referred to as $H$ and the information vector $\xi$ also known as $b$. The entries are created based on the information of the motion and the observation. # + pycharm={"is_executing": false} R = 0.2 Q = 0.2 N = 3 graphics_radius = 0.1 odom = np.empty((N,1)) obs = np.empty((N,1)) x_true = np.empty((N,1)) landmark = 3 # Simulated readings of odometry and observations x_true[0], odom[0], obs[0] = 0.0, 0.0, 2.9 x_true[1], odom[1], obs[1] = 1.0, 1.5, 2.0 x_true[2], odom[2], obs[2] = 2.0, 2.4, 1.0 hxDR = copy.deepcopy(odom) # Visualization plt.plot(landmark,0, '*k', markersize=30) for i in range(N): plt.plot(odom[i], 0, '.', markersize=50, alpha=0.8, color='steelblue') plt.plot([odom[i], odom[i] + graphics_radius], [0,0], 'r') plt.text(odom[i], 0.02, "X_{}".format(i), fontsize=12) plt.plot(obs[i]+odom[i],0,'.', markersize=25, color='brown') plt.plot(x_true[i],0,'.g', markersize=20) plt.grid() plt.show() # Defined as a function to facilitate iteration def get_H_b(odom, obs): """ Create the information matrix and information vector. This implementation is based on the concept of virtual measurement i.e. the observations of the landmarks are converted into constraints (edges) between the nodes that have observed this landmark. """ measure_constraints = {} omegas = {} zids = list(itertools.combinations(range(N),2)) H = np.zeros((N,N)) b = np.zeros((N,1)) for (t1, t2) in zids: x1 = odom[t1] x2 = odom[t2] z1 = obs[t1] z2 = obs[t2] # Adding virtual measurement constraint measure_constraints[(t1,t2)] = (x2-x1-z1+z2) omegas[(t1,t2)] = (1 / (2*Q)) # populate system's information matrix and vector H[t1,t1] += omegas[(t1,t2)] H[t2,t2] += omegas[(t1,t2)] H[t2,t1] -= omegas[(t1,t2)] H[t1,t2] -= omegas[(t1,t2)] b[t1] += omegas[(t1,t2)] * measure_constraints[(t1,t2)] b[t2] -= omegas[(t1,t2)] * measure_constraints[(t1,t2)] return H, b H, b = get_H_b(odom, obs) print("The determinant of H: ", np.linalg.det(H)) H[0,0] += 1 # np.inf ? print("The determinant of H after anchoring constraint: ", np.linalg.det(H)) for i in range(5): H, b = get_H_b(odom, obs) H[(0,0)] += 1 # Recover the posterior over the path dx = np.linalg.inv(H) @ b odom += dx # repeat till convergence print("Running graphSLAM ...") print("Odometry values after optimzation: \n", odom) plt.figure() plt.plot(x_true, np.zeros(x_true.shape), '.', markersize=20, label='Ground truth') plt.plot(odom, np.zeros(x_true.shape), '.', markersize=20, label='Estimation') plt.plot(hxDR, np.zeros(x_true.shape), '.', markersize=20, label='Odom') plt.legend() plt.grid() plt.show() # - # In particular, the tasks are split into 2 parts, graph construction, and graph optimization. # ### Graph Construction # # Firstly the nodes are defined $\boldsymbol{x} = \boldsymbol{x}_{1:n}$ such that each node is the pose of the robot at time $t_i$ # Secondly, the edges i.e. the constraints, are constructed according to the following conditions: # # * robot moves from $\boldsymbol{x}_i$ to $\boldsymbol{x}_j$. This edge corresponds to the odometry measurement. Relative motion constraints (Not included in the previous minimal example). # * Measurement constraints, this can be done in two ways: # * The information matrix is set in such a way that it includes the landmarks in the map as well. Then the constraints can be entered in a straightforward fashion between a node $\boldsymbol{x}_i$ and some landmark $m_k$ # * Through creating a virtual measurement among all the node that have observed the same landmark. More concretely, robot observes the same landmark from $\boldsymbol{x}_i$ and $\boldsymbol{x}_j$. Relative measurement constraint. The "virtual measurement" $\boldsymbol{z}_{ij}$, which is the estimated pose of $\boldsymbol{x}_j$ as seen from the node $\boldsymbol{x}_i$. The virtual measurement can then be entered in the information matrix and vector in a similar fashion to the motion constraints. # # An edge is fully characterized by the values of the error (entry to information vector) and the local information matrix (entry to the system's information vector). The larger the local information matrix (lower $Q$ or $R$) the values that this edge will contribute with. # # Important Notes: # # * The addition to the information matrix and vector are added to the earlier values. # * In the case of 2D robot, the constraints will be non-linear, therefore, a Jacobian of the error w.r.t the states is needed when updated $H$ and $b$. # * The anchoring constraint is needed in order to avoid having a singular information matri. # # # ### Graph Optimization # # The result from this formulation yields an overdetermined system of equations. # The goal after constructing the graph is to find: $x^* = \underset{x}{\mathrm{argmin}} ~ \underset{ij} \Sigma ~ f(e_{ij}) $, where $f$ is some error function that depends on the edges between to related nodes $i$ and $j$. The derivation in the references arrive at the solution for $x^* = H^{-1}b$ # # # ### Planar Example: # # Now we will go through an example with a more realistic case of a 2D robot with 3DoF, namely, $[x, y, \theta]^T$ # + pycharm={"is_executing": false} # Simulation parameter Qsim = np.diag([0.01, np.deg2rad(0.010)])**2 # error added to range and bearing Rsim = np.diag([0.1, np.deg2rad(1.0)])**2 # error added to [v, w] DT = 2.0 # time tick [s] SIM_TIME = 100.0 # simulation time [s] MAX_RANGE = 30.0 # maximum observation range STATE_SIZE = 3 # State size [x,y,yaw] # TODO: Why not use Qsim ? # Covariance parameter of Graph Based SLAM C_SIGMA1 = 0.1 C_SIGMA2 = 0.1 C_SIGMA3 = np.deg2rad(1.0) MAX_ITR = 20 # Maximum iteration during optimization timesteps = 1 # consider only 2 landmarks for simplicity # RFID positions [x, y, yaw] RFID = np.array([[10.0, -2.0, 0.0], # [15.0, 10.0, 0.0], # [3.0, 15.0, 0.0], # [-5.0, 20.0, 0.0], # [-5.0, 5.0, 0.0] ]) # State Vector [x y yaw v]' xTrue = np.zeros((STATE_SIZE, 1)) xDR = np.zeros((STATE_SIZE, 1)) # Dead reckoning xTrue[2] = np.deg2rad(45) xDR[2] = np.deg2rad(45) # history initial values hxTrue = xTrue hxDR = xTrue _, z, _, _ = observation(xTrue, xDR, np.array([[0,0]]).T, RFID) hz = [z] for i in range(timesteps): u = calc_input() xTrue, z, xDR, ud = observation(xTrue, xDR, u, RFID) hxDR = np.hstack((hxDR, xDR)) hxTrue = np.hstack((hxTrue, xTrue)) hz.append(z) # visualize graphics_radius = 0.3 plt.plot(RFID[:, 0], RFID[:, 1], "*k", markersize=20) plt.plot(hxDR[0, :], hxDR[1, :], '.', markersize=50, alpha=0.8, label='Odom') plt.plot(hxTrue[0, :], hxTrue[1, :], '.', markersize=20, alpha=0.6, label='X_true') for i in range(hxDR.shape[1]): x = hxDR[0, i] y = hxDR[1, i] yaw = hxDR[2, i] plt.plot([x, x + graphics_radius * np.cos(yaw)], [y, y + graphics_radius * np.sin(yaw)], 'r') d = hz[i][:, 0] angle = hz[i][:, 1] plt.plot([x + d * np.cos(angle + yaw)], [y + d * np.sin(angle + yaw)], '.', markersize=20, alpha=0.7) plt.legend() plt.grid() plt.show() # + pycharm={"is_executing": false} # Copy the data to have a consistent naming with the .py file zlist = copy.deepcopy(hz) x_opt = copy.deepcopy(hxDR) xlist = copy.deepcopy(hxDR) number_of_nodes = x_opt.shape[1] n = number_of_nodes * STATE_SIZE # - # After the data has been saved, the graph will be constructed by looking at each pair for nodes. The virtual measurement is only created if two nodes have observed the same landmark at different points in time. The next cells are a walk through for a single iteration of graph construction -> optimization -> estimate update. # + pycharm={"is_executing": false} # get all the possible combination of the different node zids = list(itertools.combinations(range(len(zlist)), 2)) print("Node combinations: \n", zids) for i in range(xlist.shape[1]): print("Node {} observed landmark with ID {}".format(i, zlist[i][0, 3])) # - # In the following code snippet the error based on the virtual measurement between node 0 and 1 will be created. # The equations for the error is as follows: # $e_{ij}^x = x_j + d_j cos(\psi_j + \theta_j) - x_i - d_i cos(\psi_i + \theta_i) $ # # $e_{ij}^y = y_j + d_j sin(\psi_j + \theta_j) - y_i - d_i sin(\psi_i + \theta_i) $ # # $e_{ij}^x = \psi_j + \theta_j - \psi_i - \theta_i $ # # Where $[x_i, y_i, \psi_i]$ is the pose for node $i$ and similarly for node $j$, $d$ is the measured distance at nodes $i$ and $j$, and $\theta$ is the measured bearing to the landmark. The difference is visualized with the figure in the next cell. # # In case of perfect motion and perfect measurement the error shall be zero since $ x_j + d_j cos(\psi_j + \theta_j)$ should equal $x_i + d_i cos(\psi_i + \theta_i)$ # + pycharm={"is_executing": false} # Initialize edges list edges = [] # Go through all the different combinations for (t1, t2) in zids: x1, y1, yaw1 = xlist[0, t1], xlist[1, t1], xlist[2, t1] x2, y2, yaw2 = xlist[0, t2], xlist[1, t2], xlist[2, t2] # All nodes have valid observation with ID=0, therefore, no data association condition iz1 = 0 iz2 = 0 d1 = zlist[t1][iz1, 0] angle1, phi1 = zlist[t1][iz1, 1], zlist[t1][iz1, 2] d2 = zlist[t2][iz2, 0] angle2, phi2 = zlist[t2][iz2, 1], zlist[t2][iz2, 2] # find angle between observation and horizontal tangle1 = pi_2_pi(yaw1 + angle1) tangle2 = pi_2_pi(yaw2 + angle2) # project the observations tmp1 = d1 * math.cos(tangle1) tmp2 = d2 * math.cos(tangle2) tmp3 = d1 * math.sin(tangle1) tmp4 = d2 * math.sin(tangle2) edge = Edge() print(y1,y2, tmp3, tmp4) # calculate the error of the virtual measurement # node 1 as seen from node 2 throught the observations 1,2 edge.e[0, 0] = x2 - x1 - tmp1 + tmp2 edge.e[1, 0] = y2 - y1 - tmp3 + tmp4 edge.e[2, 0] = pi_2_pi(yaw2 - yaw1 - tangle1 + tangle2) edge.d1, edge.d2 = d1, d2 edge.yaw1, edge.yaw2 = yaw1, yaw2 edge.angle1, edge.angle2 = angle1, angle2 edge.id1, edge.id2 = t1, t2 edges.append(edge) print("For nodes",(t1,t2)) print("Added edge with errors: \n", edge.e) # Visualize measurement projections plt.plot(RFID[0, 0], RFID[0, 1], "*k", markersize=20) plt.plot([x1,x2],[y1,y2], '.', markersize=50, alpha=0.8) plt.plot([x1, x1 + graphics_radius * np.cos(yaw1)], [y1, y1 + graphics_radius * np.sin(yaw1)], 'r') plt.plot([x2, x2 + graphics_radius * np.cos(yaw2)], [y2, y2 + graphics_radius * np.sin(yaw2)], 'r') plt.plot([x1,x1+tmp1], [y1,y1], label="obs 1 x") plt.plot([x2,x2+tmp2], [y2,y2], label="obs 2 x") plt.plot([x1,x1], [y1,y1+tmp3], label="obs 1 y") plt.plot([x2,x2], [y2,y2+tmp4], label="obs 2 y") plt.plot(x1+tmp1, y1+tmp3, 'o') plt.plot(x2+tmp2, y2+tmp4, 'o') plt.legend() plt.grid() plt.show() # - # Since the constraints equations derived before are non-linear, linearization is needed before we can insert them into the information matrix and information vector. Two jacobians # # $A = \frac{\partial e_{ij}}{\partial \boldsymbol{x}_i}$ as $\boldsymbol{x}_i$ holds the three variabls x, y, and theta. # Similarly, # $B = \frac{\partial e_{ij}}{\partial \boldsymbol{x}_j}$ # + pycharm={"is_executing": false} # Initialize the system information matrix and information vector H = np.zeros((n, n)) b = np.zeros((n, 1)) x_opt = copy.deepcopy(hxDR) for edge in edges: id1 = edge.id1 * STATE_SIZE id2 = edge.id2 * STATE_SIZE t1 = edge.yaw1 + edge.angle1 A = np.array([[-1.0, 0, edge.d1 * math.sin(t1)], [0, -1.0, -edge.d1 * math.cos(t1)], [0, 0, -1.0]]) t2 = edge.yaw2 + edge.angle2 B = np.array([[1.0, 0, -edge.d2 * math.sin(t2)], [0, 1.0, edge.d2 * math.cos(t2)], [0, 0, 1.0]]) # TODO: use Qsim instead of sigma sigma = np.diag([C_SIGMA1, C_SIGMA2, C_SIGMA3]) Rt1 = calc_rotational_matrix(tangle1) Rt2 = calc_rotational_matrix(tangle2) edge.omega = np.linalg.inv(Rt1 @ sigma @ Rt1.T + Rt2 @ sigma @ Rt2.T) # Fill in entries in H and b H[id1:id1 + STATE_SIZE, id1:id1 + STATE_SIZE] += A.T @ edge.omega @ A H[id1:id1 + STATE_SIZE, id2:id2 + STATE_SIZE] += A.T @ edge.omega @ B H[id2:id2 + STATE_SIZE, id1:id1 + STATE_SIZE] += B.T @ edge.omega @ A H[id2:id2 + STATE_SIZE, id2:id2 + STATE_SIZE] += B.T @ edge.omega @ B b[id1:id1 + STATE_SIZE] += (A.T @ edge.omega @ edge.e) b[id2:id2 + STATE_SIZE] += (B.T @ edge.omega @ edge.e) print("The determinant of H: ", np.linalg.det(H)) plt.figure() plt.subplot(1,2,1) plt.imshow(H, extent=[0, n, 0, n]) plt.subplot(1,2,2) plt.imshow(b, extent=[0, 1, 0, n]) plt.show() # Fix the origin, multiply by large number gives same results but better visualization H[0:STATE_SIZE, 0:STATE_SIZE] += np.identity(STATE_SIZE) print("The determinant of H after origin constraint: ", np.linalg.det(H)) plt.figure() plt.subplot(1,2,1) plt.imshow(H, extent=[0, n, 0, n]) plt.subplot(1,2,2) plt.imshow(b, extent=[0, 1, 0, n]) plt.show() # + pycharm={"is_executing": false} # Find the solution (first iteration) dx = - np.linalg.inv(H) @ b for i in range(number_of_nodes): x_opt[0:3, i] += dx[i * 3:i * 3 + 3, 0] print("dx: \n",dx) print("ground truth: \n ",hxTrue) print("Odom: \n", hxDR) print("SLAM: \n", x_opt) # performance will improve with more iterations, nodes and landmarks. print("\ngraphSLAM localization error: ", np.sum((x_opt - hxTrue) ** 2)) print("Odom localization error: ", np.sum((hxDR - hxTrue) ** 2)) # - # ### The references: # # # * http://robots.stanford.edu/papers/thrun.graphslam.pdf # # * http://ais.informatik.uni-freiburg.de/teaching/ss13/robotics/slides/16-graph-slam.pdf # # * http://www2.informatik.uni-freiburg.de/~stachnis/pdf/grisetti10titsmag.pdf # # N.B. # An additional step is required that uses the estimated path to update the belief regarding the map. # + pycharm={"is_executing": false}
SLAM/GraphBasedSLAM/graphSLAM_doc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import main main.interpret_get_macro("!query_testing " + "~test") raw = """// // Created by <NAME> on 27/9/20. // #include <cassert> #include "Primal.h" Primal::Command::Command(vector<string> tags, string command) : tags(tags), command(command) { // Do nothing } Primal::Command::Command(string type, string command) : command(command) { this->tags = vector<string>(); tags.push_back(type); } void Primal::Child(Thread *child) { this->children.push_back(child); this->Connect(child); } Primal::Primal() : results(map<int, Prototype>()), resultsMutex(std::mutex()) { } void Primal::NewThread() { auto* thread = new Thread(this); Child(thread); } void Primal::End() { for(auto* thread: children) { thread->endFlag = true; assert(thread->thread.joinable()); // Checks if the thread is still operating thread->thread.join(); } } void Primal::MergeIntoResult(Prototype *result, unsigned int index) { std::lock_guard<std::mutex> guard(resultsMutex); // Protects the results-vector from asynchronous competition if (results.find(index) != results.end()) results[index] = *result; else results[index].merge(*result->subvalues_col); // Tidy up delete result; }""" import re regex = r'\n\w*[\s::]*\w+::(\w)+\([\w\s<>,*&]*\).*:?w*{' re.findall(regex, raw) # + # regex algorithm for unfiltered function names storage = [] temp_raw = raw isFailed = False while not isFailed: try: f = re.search(r'\n\s*\w*[\s::]*\w+::(\w)+\([\w\:s<>,*&]*\).*:?w*{', temp_raw) storage.append(f.group(0)) temp_raw = temp_raw.replace(f.group(0), "") except: isFailed = True storage # regex for constructor declaration regex = r'(\w)*:?:?(\w+)::\2\([\w<>:&*,\s]*\)' # regex for function name (delimited by '::', just split by '::' and you're good to go!) regex = r'\w*(::)*\w+::\w+(?=(\([\w<>&*:,\s]*\)))' # once those constructor names are crossed out, you can use the following to get the remaining function names fully filtered: regex = r'\w+\([\w<:>,*&\s]*\)' # + # Regex for subclasses/substructs names (unfiltered) # NOTE: remember to store if it is a subclass along with it's name regex = r'::\w+::' # Once subclasses / substruct results are filtered out: # Regex for classes / struct names (unfiltered) # NOTE: remember to store if it is a subclass along with it's name regex = r'\s\w+::' # + # Regex for header files: # Regex for finding substructs: regex = r'(struct .+[\s.\w:<>;(),:{*&};]+?)};' # Regex for finding subclasses: regex = r'(class .+[\s.\w<:>;(),:{*&};]+?)};' # Regex for finding classes (with the filename as class name it should be checked for after) regex = r'(class .+[\s.\w<:>;(),:{*&};]+)};' # Regex for finding structs (with the filename as struct name it should be checked for after) regex = r'(struct .+[\s.\w:<>;(),:{*&};]+)};' # To find the name of the individual struct/class, simply split by spaces and retrieve the second word # + # 0. Create a function called hashmemory, that stores each value given to it into a dict, with a hash and a value as a value and the name as the key, and in a second dict with the hash as key it returns the hash # 1. If it is a subclass/substruct, replace the class/struct definition with the returned hash; Store every class/struct via hashmemory # NOTE: remember to store a reference to the parent class/struct string and whether it is a child struct or not # 2. For each class/struct in custom_data_structures: # 1. Get the string representing a particular struct or class # 3. Find all of a certain function within a particular struct or class, with: regex = r'(TypeFunctionNameHere\(.*\);)' # 4. Replace with filtered function names from .cpp files and add a semicolon at the end # 5. Store the resulting class/struct definition in a dictionary (called 'results') with the name as key # 3. Replace the hashmemory stored sclass/struct declaration (AKA hash) in the parent class/struct with matching (via hashmemory's two dicts) the built string from 'results' # 4. Replace all the nonsub classes/struct's definitions in the .h file string with the definitions stored by hashmemory in the two dicts # - val = "run_this_macro: std::cout << {1} << {0} << std::endl; std::cout << {2};" # + name = re.search(r'^\w+(?=:)', val) print(name.group(0)) # - value = re.search(r'(?::).+$', val).group(0)[1:].strip() print(value) # + # Regex to get comments (need to remove the first two characters then you're good to go!) regex = r'\/\/.*$' # Regex to get multiline comments (need to remove the first and last two characters an then you're good to go!) regex = r'\/\*(\s|.)*\*\/' # Regex to get generated code regex = r'\n\/\/ GENERATED CODE START \/\/\n(.|\s)*\n\/\/ GENERATED CODE END \/\/\n'
testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding: utf-8 -*- # Author: <NAME> # Date: 2019-02-24 16:42:55 from itertools import chain import gc from nltk import ngrams from sklearn.model_selection import train_test_split from sklearn.feature_extraction import DictVectorizer import SQLite_handler from my_weapon import * from tqdm import tqdm_notebook as tqdm from myclf import * from Trump_Clinton_Classifer.TwProcess import CustomTweetTokenizer from Trump_Clinton_Classifer.TwSentiment import bag_of_words, bag_of_words_and_bigrams # + class Fake_Classifer(object): def __init__(self): self.MAP_LABELS = { "0": "fake", "1": "extreme bias (right)", "2": "right", "3": "right leaning", "4": "center", "5": "left leaning", "6": "left", "7": "extreme bias (left)" } def get_train_data(self): """ 获取训练文本 """ print("loading all tweets_csv ...") all_tweets = pd.read_csv("disk/all-tweets.csv", dtype=str, usecols=["tweet_id", "media_type"]) print("finished!") map_labels = { "0": "fake", "1": "extreme bias (right)", "2": "right", "3": "right leaning", "4": "center", "5": "left leaning", "6": "left", "7": "extreme bias (left)" } for _type, f_label in map_labels.items(): print(_type, "...") tweets_id = all_tweets[all_tweets["media_type"] == _type].tweet_id rst = SQLite_handler.find_tweets(tweets_id) print(len(rst)) with open("disk/train_data_fake/{}.txt".format(_type), "w") as f: for d in rst: if "text" not in d: continue # elif d["text"].startswith("RT"): # continue f.write(d["text"] + "\n") def get_tokens(self): """ text > tokens """ tokenizer = CustomTweetTokenizer() for _type, f_label in self.MAP_LABELS.items(): with open("disk/tokens_fake/{}.txt".format(_type), "w") as f: for line in open("disk/train_data_fake/{}.txt".format(_type)): words = tokenizer.tokenize(line.strip()) if len(words) > 0 and words[0] != "RT": f.write(" ".join(words) + "\n") def train(self): """ fake, non-fake fake, left, center, right √ 优先 left, center, right """ # read data X = [] y = [] for _type, f_label in self.MAP_LABELS.items(): if f_label == "fake": y_i = 0 elif f_label in ["extreme bias (right)", "right", "right leaning"]: y_i = 1 elif f_label == "center": y_i = 2 elif f_label in ["extreme bias (left)", "left", "left leaning"]: y_i = 3 for i, line in enumerate(open("disk/tokens_fake/{}.txt".format(_type))): w = line.strip().split(" ") # if len(w) > 0 and w[0] != "RT": X.append(bag_of_words_and_bigrams(w)) # print(X[-1]) y.append(y_i) print("Reading data finished! count:", len(y)) # split train and test data X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) del X, y gc.collect() print("Splitting data finished!") # build one hot embedding v = DictVectorizer(dtype=np.int8, sparse=True, sort=False) X_train = v.fit_transform(X_train) X_test = v.transform(X_test) print("Building word embedding finished!") print(X_train[0].shape, X_train[1].shape) print(X_train.shape, X_test.shape) # machine learning model # list_classifiers = ['LR', 'GBDT', 'NB', 'RF'] list_classifiers = ['NB', 'LR', 'SVMLINER', 'GBDT'] classifiers = { 'NB': naive_bayes_classifier, 'KNN': knn_classifier, 'LR': logistic_regression_classifier, 'RF': random_forest_classifier, 'DT': decision_tree_classifier, 'SVM': svm_classifier, 'SVMCV': svm_cross_validation, 'GBDT': gradient_boosting_classifier, 'SVMLINER': svm_linear_classifier, } for classifier in list_classifiers: print('******************* {} ********************'.format(classifier)) if classifier == "GBDT": clf = GradientBoostingClassifier(learning_rate=0.1, max_depth=3) clf.fit(X_train, y_train) if classifier == "LR": clf = LogisticRegression(penalty='l2', multi_class="multinomial", solver="sag", max_iter=10e8) clf.fit(X_train, y_train) else: clf = classifiers[classifier](X_train, y_train) # print("fitting finished! Lets evaluate!") self.evaluate(clf, X_train, y_train, X_test, y_test) # original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': 3, 'random_state': 23, # 'min_samples_split': 5} # for GDBT # for i, setting in enumerate([{'learning_rate': 1.0, 'subsample': 1.0}, # {'learning_rate': 0.1, 'subsample': 1.0}, # {'learning_rate': 1.0, 'subsample': 0.5}, # {'learning_rate': 0.1, 'subsample': 0.5}, # {'learning_rate': 0.1, 'max_features': 2}]): # print('******************* {} ********************'.format(i)) # params = dict(original_params) # params.update(setting) # clf = GradientBoostingClassifier(**params) # clf.fit(X_train, y_train) # self.evaluate(clf, X_train, y_train, X_test, y_test) # original_params = {} # LinearSVC # for i, setting in enumerate([{'C':0.125}, {'C': 0.25}, {'C':0.5}, {'C':1.0}, {'C':2.0}, {'C': 4.0}, {'C':8.0}]): # print('******************* {} ********************'.format(i)) # print(setting) # params = dict(original_params) # params.update(setting) # clf = LinearSVC(**params) # clf.fit(X_train, y_train) # self.evaluate(clf, X_train, y_train, X_test, y_test) def evaluate(self, clf, X_train, y_train, X_test, y_test): # CV print('accuracy of CV=5:', cross_val_score(clf, X_train, y_train, cv=5).mean()) # 模型评估 y_pred = clf.predict(X_test) print(classification_report(y_test, y_pred)) # - Lebron = Fake_Classifer() # Lebron.get_train_data() # Lebron.get_tokens() Lebron.train()
fake_classifer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #export from local.torch_basics import * from local.test import * from local.layers import * from local.data.all import * from local.notebook.showdoc import show_doc from local.optimizer import * from local.learner import * from local.callback.progress import * # + #default_exp callback.fp16 # - #hide from local.utils.test import * # # Mixed precision training # # > Callback and utility functions to allow mixed precision training # ## A little bit of theory # Continuing the documentation on the fastai_v1 development here is a brief piece about mixed precision training. A very nice and clear introduction to it is [this video from NVIDIA](http://on-demand.gputechconf.com/gtc/2018/video/S81012/). # # ### What's half precision? # In neural nets, all the computations are usually done in single precision, which means all the floats in all the arrays that represent inputs, activations, weights... are 32-bit floats (FP32 in the rest of this post). An idea to reduce memory usage (and avoid those annoying cuda errors) has been to try and do the same thing in half-precision, which means using 16-bits floats (or FP16 in the rest of this post). By definition, they take half the space in RAM, and in theory could allow you to double the size of your model and double your batch size. # # Another very nice feature is that NVIDIA developed its latest GPUs (the Volta generation) to take fully advantage of half-precision tensors. Basically, if you give half-precision tensors to those, they'll stack them so that each core can do more operations at the same time, and theoretically gives an 8x speed-up (sadly, just in theory). # # So training at half precision is better for your memory usage, way faster if you have a Volta GPU (still a tiny bit faster if you don't since the computations are easiest). How do we do it? Super easily in pytorch, we just have to put .half() everywhere: on the inputs of our model and all the parameters. Problem is that you usually won't see the same accuracy in the end (so it happens sometimes) because half-precision is... well... not as precise ;). # # ### Problems with half-precision: # To understand the problems with half precision, let's look briefly at what an FP16 looks like (more information [here](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)). # # ![half float](images/half.png) # # The sign bit gives us +1 or -1, then we have 5 bits to code an exponent between -14 and 15, while the fraction part has the remaining 10 bits. Compared to FP32, we have a smaller range of possible values (2e-14 to 2e15 roughly, compared to 2e-126 to 2e127 for FP32) but also a smaller *offset*. # # For instance, between 1 and 2, the FP16 format only represents the number 1, 1+2e-10, 1+2*2e-10... which means that 1 + 0.0001 = 1 in half precision. That's what will cause a certain numbers of problems, specifically three that can occur and mess up your training. # 1. The weight update is imprecise: inside your optimizer, you basically do w = w - lr * w.grad for each weight of your network. The problem in performing this operation in half precision is that very often, w.grad is several orders of magnitude below w, and the learning rate is also small. The situation where w=1 and lr*w.grad is 0.0001 (or lower) is therefore very common, but the update doesn't do anything in those cases. # 2. Your gradients can underflow. In FP16, your gradients can easily be replaced by 0 because they are too low. # 3. Your activations or loss can overflow. The opposite problem from the gradients: it's easier to hit nan (or infinity) in FP16 precision, and your training might more easily diverge. # # ### The solution: mixed precision training # # To address those three problems, we don't fully train in FP16 precision. As the name mixed training implies, some of the operations will be done in FP16, others in FP32. This is mainly to take care of the first problem listed above. For the next two there are additional tricks. # # The main idea is that we want to do the forward pass and the gradient computation in half precision (to go fast) but the update in single precision (to be more precise). It's okay if w and grad are both half floats, but when we do the operation w = w - lr * grad, we need to compute it in FP32. That way our 1 + 0.0001 is going to be 1.0001. # # This is why we keep a copy of the weights in FP32 (called master model). Then, our training loop will look like: # 1. compute the output with the FP16 model, then the loss # 2. back-propagate the gradients in half-precision. # 3. copy the gradients in FP32 precision # 4. do the update on the master model (in FP32 precision) # 5. copy the master model in the FP16 model. # # Note that we lose precision during step 5, and that the 1.0001 in one of the weights will go back to 1. But if the next update corresponds to add 0.0001 again, since the optimizer step is done on the master model, the 1.0001 will become 1.0002 and if we eventually go like this up to 1.0005, the FP16 model will be able to tell the difference. # # That takes care of problem 1. For the second problem, we use something called gradient scaling: to avoid the gradients getting zeroed by the FP16 precision, we multiply the loss by a scale factor (scale=512 for instance). That way we can push the gradients to the right in the next figure, and have them not become zero. # # ![half float representation](images/half_representation.png) # # Of course we don't want those 512-scaled gradients to be in the weight update, so after converting them into FP32, we can divide them by this scale factor (once they have no risks of becoming 0). This changes the loop to: # 1. compute the output with the FP16 model, then the loss. # 2. multiply the loss by scale then back-propagate the gradients in half-precision. # 3. copy the gradients in FP32 precision then divide them by scale. # 4. do the update on the master model (in FP32 precision). # 5. copy the master model in the FP16 model. # # For the last problem, the tricks offered by NVIDIA are to leave the batchnorm layers in single precision (they don't have many weights so it's not a big memory challenge) and compute the loss in single precision (which means converting the last output of the model in single precision before passing it to the loss). # # ![Mixed precision training](images/Mixed_precision.jpeg) # # ### Dynamic loss scaling # # The only annoying thing with the previous implementation of mixed precision training is that it introduces one new hyper-parameter to tune, the value of the loss scaling. Fortunately for us, there is a way around this. We want the loss scaling to be as high as possible so that our gradients can use the whole range of representation, so let's first try a really high value. In all likelihood, this will cause our gradients or our loss to overflow, and we will try again with half that big value, and again, until we get to the largest loss scale possible that doesn't make our gradients overflow. # # This value will be perfectly fitted to our model and can continue to be dynamically adjusted as the training goes, if it's still too high, by just halving it each time we overflow. After a while though, training will converge and gradients will start to get smaller, so we al # so need a mechanism to get this dynamic loss scale larger if it's safe to do so. The strategy used in the Apex library is to multiply the loss scale by 2 each time we had a given number of iterations without overflowing. # ## Util functions # Before going in the main `Callback` we will need some helper functions. We use the ones from the [APEX library](https://github.com/NVIDIA/apex). # export from local.utils.fp16_utils import convert_network, model_grads_to_master_grads, master_params_to_model_params # ### Converting the model to FP16 # We will need a function to convert all the layers of the model to FP16 precision except the BatchNorm-like layers (since those need to be done in FP32 precision to be stable). In Apex, the function that does this for us is `convert_network`. We can use it to put the model in FP16 or back to FP32. # + model = nn.Sequential(nn.Linear(10,30), nn.BatchNorm1d(30), nn.Linear(30,2)).cuda() model = convert_network(model, torch.float16) for i,t in enumerate([torch.float16, torch.float32, torch.float16]): test_eq(model[i].weight.dtype, t) test_eq(model[i].bias.dtype, t) model = nn.Sequential(nn.Linear(10,30), BatchNorm(30, ndim=1), nn.Linear(30,2)).cuda() model = convert_network(model, torch.float16) for i,t in enumerate([torch.float16, torch.float32, torch.float16]): test_eq(model[i].weight.dtype, t) test_eq(model[i].bias.dtype, t) # - # ### Creating the master copy of the parameters # From our model parameters (mostly in FP16), we'll want to create a copy in FP32 (master parameters) that we will use for the step in the optimizer. Optionally, we concatenate all the parameters to do one flat big tensor, which can make that step a little bit faster. # # We can't use the FP16 util function here as it doesn't handle multiple parameter groups, which is the thing we use to # - do transfer learning and freeze some layers # - apply discriminative learning rates # - don't apply weight decay to some layers (like BatchNorm) or the bias terms # + #export from torch.nn.utils import parameters_to_vector def get_master(opt, flat_master=False): model_params = [[param for param in pg if param.requires_grad] for pg in opt.param_groups] if flat_master: master_params = [] for pg in model_params: mp = parameters_to_vector([param.data.float() for param in pg]) mp = torch.nn.Parameter(mp, requires_grad=True) if mp.grad is None: mp.grad = mp.new(*mp.size()) master_params.append([mp]) else: master_params = [[param.clone().float().detach() for param in pg] for pg in model_params] for pg in master_params: for param in pg: param.requires_grad_(True) return model_params, master_params # - #hide #cuda learn = synth_learner() learn.model = convert_network(nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)), torch.float16).cuda() learn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())] learn.opt = learn.opt_func(learn.splitter(learn.model), learn.lr) model_p,master_p = get_master(learn.opt) test_eq(len(model_p), 2) #2 pqrqm groups test_eq(len(master_p), 2) for pg1,pg2 in zip(model_p,master_p): test_eq([p.float() for p in pg1], pg2) #Same values but different types for p in pg1: assert p.dtype == torch.float16 #hide #cuda #Flattened version model_pf,master_pf = get_master(learn.opt, flat_master=True) test_eq(len(model_pf), 2) #2 pqrqm groups test_eq(len(master_pf), 2) for pg1,pg2 in zip(model_pf,master_pf): test_eq(len(pg2), 1) #One flattened tensor test_eq([p.float().squeeze() for p in pg1], [p for p in pg2[0]]) #Same values but different types for p in pg1: assert p.dtype == torch.float16 # ### Copy the gradients from model params to master params # After the backward pass, all gradients must be copied to the master params before the optimizer step can be done in FP32. The corresponding function in the Apex utils is `model_grads_to_master_grads` but we need to adapt it to work with param groups. # export def to_master_grads(model_pgs, master_pgs, flat_master=False): for (model_params,master_params) in zip(model_pgs,master_pgs): model_grads_to_master_grads(model_params, master_params, flat_master=flat_master) #hide #cuda xb,yb = learn.dbunch.one_batch() pred = learn.model.cuda()(xb.cuda().half()) loss = F.mse_loss(pred, yb.cuda().half()) loss.backward() to_master_grads(model_p, master_p) to_master_grads(model_pf, master_pf, flat_master=True) test_eq([[p.grad.float() for p in pg] for pg in model_p], [[p.grad for p in pg] for pg in master_p]) test_eq([[p.grad.float().squeeze() for p in pg] for pg in model_pf], [[p for p in pg[0].grad] for pg in master_pf]) xb.shape # ### Copy the master params to the model params # After the step, we need to copy back the master parameters to the model parameters for the next update. The corresponding function in Apex is `master_params_to_model_params`. # export def to_model_params(model_pgs, master_pgs, flat_master:bool=False)->None: for (model_params,master_params) in zip(model_pgs,master_pgs): master_params_to_model_params(model_params, master_params, flat_master=flat_master) #hide #cuda learn.opt.param_groups = master_p learn.opt.step() to_model_params(model_p, master_p) test_close([[p.float() for p in pg] for pg in model_p], [[p for p in pg] for pg in master_p], eps=1e-3) #hide #cuda learn.opt.param_groups = master_pf learn.opt.step() to_model_params(model_pf, master_pf, flat_master=True) test_close([[p.float().squeeze() for p in pg] for pg in model_pf], [[p for p in pg[0]] for pg in master_pf], eps=1e-3) # ### Checking for overflow # For dynamic loss caling, we need to know when the gradients have gone up to infinity. It's faster to check it on the sum than to do `torch.isinf(x).any()`. # export def test_overflow(x): s = float(x.float().sum()) return (s == float('inf') or s == float('-inf') or s != s) x = torch.randn(3,4) assert not test_overflow(x) x[1,2] = float('inf') assert test_overflow(x) # Then we can use it in the following function that checks for gradient overflow: # export def grad_overflow(pgs): for pg in pgs: for p in pg: if p.grad is not None and test_overflow(p.grad.data): return True return False #hide #cuda assert not grad_overflow(model_p) assert not grad_overflow(model_pf) model_p[1][0].grad.data[0,0] = float('inf') model_pf[0][1].grad.data[0] = float('inf') assert grad_overflow(model_p) assert grad_overflow(model_pf) # ## MixedPrecision - # export @docs class MixedPrecision(Callback): "Run training in mixed precision" toward_end=True def __init__(self, loss_scale=512, flat_master=False, dynamic=True, max_loss_scale=2.**24, div_factor=2., scale_wait=500, clip=None): assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn." self.flat_master,self.dynamic,self.max_loss_scale = flat_master,dynamic,max_loss_scale self.div_factor,self.scale_wait,self.clip = div_factor,scale_wait,clip self.loss_scale = max_loss_scale if dynamic else loss_scale def begin_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float16) self.model_pgs,self.master_pgs = get_master(self.opt, self.flat_master) #Changes the optimizer so that the optimization step is done in FP32. self.learn.opt.param_groups = self.master_pgs if self.dynamic: self.count = 0 def begin_batch(self): self.learn.xb = to_half(self.xb) def after_pred(self): self.learn.pred = self.pred.float() def after_loss(self): if self.training: self.learn.loss *= self.loss_scale def after_backward(self): self.learn.loss /= self.loss_scale #To record the real loss #First, check for an overflow if self.dynamic and grad_overflow(self.model_pgs): self.loss_scale /= self.div_factor self.model.zero_grad() raise CancelBatchException() #skip step and zero_grad to_master_grads(self.model_pgs, self.master_pgs, self.flat_master) for master_params in self.master_pgs: for param in master_params: if param.grad is not None: param.grad.div_(self.loss_scale) #Check if it's been long enough without overflow if self.clip is not None: for group in self.master_pgs: nn.utils.clip_grad_norm_(group, self.clip) if self.dynamic: self.count += 1 if self.count == self.scale_wait: self.count = 0 self.loss_scale *= self.div_factor def after_step(self): self.model.zero_grad() #Zero the gradients of the model manually (optimizer disconnected) to_model_params(self.model_pgs, self.master_pgs, self.flat_master) def after_fit(self): self.learn.model = convert_network(self.model, dtype=torch.float32) _docs = dict(begin_fit="Put the model in FP16 and prepare the two copies of the parameters", begin_batch="Put the input in FP16", after_pred="Put the output back to FP32 so that the loss is computed in FP32", after_loss="Apply loss scaling to avoid gradient underflow", after_backward="Copy the gradients to the master param and undo the loss scaling", after_step="Copy the master params to the model params", after_fit="Put the model back in FP32" ) # + #hide class TestBeforeMixedPrecision(Callback): run_before=MixedPrecision def begin_fit(self): test_eq(next(iter(self.model.parameters())).dtype, torch.float32) def begin_batch(self): test_eq(self.x.dtype, torch.float32) def after_pred(self): test_eq(self.pred.dtype, torch.float16) def after_loss(self): self.loss = self.learn.loss.detach().clone() def after_backward(self): self.has_overflown = grad_overflow(self.mixed_precision.model_pgs) self.grads = [p.grad.data.clone() for p in self.model.parameters()] self.old_params = [p.data.clone() for p in self.model.parameters()] def after_step(self): assert not self.has_overflown def after_cancel_batch(self): assert self.has_overflown class TestAfterMixedPrecision(Callback): run_after=MixedPrecision def begin_fit(self): test_eq(next(iter(self.model.parameters())).dtype, torch.float16) def after_fit(self): test_eq(next(iter(self.model.parameters())).dtype, torch.float32) def begin_batch(self): test_eq(self.x.dtype, torch.float16) def after_pred(self): test_eq(self.pred.dtype, torch.float32) def after_loss(self): loss_scale = self.mixed_precision.loss_scale if self.training else 1. test_eq(self.loss, self.test_before_mixed_precision.loss * loss_scale) def after_backward(self): tbmp = self.test_before_mixed_precision test_eq(self.loss, tbmp.loss) #Test gradients have been copied and scaled back test_close(sum([[p.grad.data for p in pg] for pg in self.mixed_precision.master_pgs], []), [g.float()/self.mixed_precision.loss_scale for g in tbmp.grads]) def after_step(self): tbmp,mp =self.test_before_mixed_precision,self.mixed_precision #Test master params have been copied to model test_close(sum([[p.data for p in pg] for pg in mp.master_pgs], []), [p.data.float() for p in self.model.parameters()], eps=1e-3) #Test update has been done properly for p,g,op in zip(self.model.parameters(), tbmp.grads, tbmp.old_params): test_close(p.data.float(), op.float() - self.lr*g.float()/self.mixed_precision.loss_scale, eps=1e-3) # - #hide #cuda learn = synth_learner(cbs=MixedPrecision(), cuda=True) learn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda() learn.opt_func = partial(SGD, mom=0.) learn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())] learn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()]) #Check loss scale did change assert 1 < learn.mixed_precision.loss_scale < 2**24 #Check the model did train for v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1 #hide #cuda learn = synth_learner(cbs=MixedPrecision(dynamic=False), cuda=True) learn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda() learn.opt_func = partial(SGD, mom=0.) learn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())] learn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()]) #Check loss scale did mot change test_eq(learn.mixed_precision.loss_scale,512) #Check the model did train for v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1 #export @delegates(MixedPrecision.__init__) @patch def to_fp16(self:Learner, **kwargs): self.add_cb(MixedPrecision(**kwargs)) learn = synth_learner(cuda=True) learn.model = nn.Sequential(nn.Linear(1,1), nn.Linear(1,1)).cuda() learn.opt_func = partial(SGD, mom=0.) learn.splitter = lambda m: [list(m[0].parameters()), list(m[1].parameters())] learn.to_fp16() learn.fit(3, cbs=[TestAfterMixedPrecision(), TestBeforeMixedPrecision()]) #Check the model did train for v1,v2 in zip(learn.recorder.values[0], learn.recorder.values[-1]): assert v2<v1 # ## Export - #hide from local.notebook.export import * notebook2script(all_fs=True)
dev/18_callback_fp16.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # LDA - Sentences - Iterative filtering - Run 0 # Note: # # This notebook loads the models from f1000_LDA_Sentence_Run_0 and returns "f1000_tokenized_LDA_sentence_1.tsv", which is the input for "f1000_LDA_Sentence_Run" with ITER = 1. # ## Configuration # + import csv import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") import re from collections import defaultdict # For word frequency from operator import itemgetter from tqdm import tqdm_notebook as tqdm import IPython #for HTML viz import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel import logging # Setting up the loggings to monitor gensim logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO) from peertax.LDA_Diagnostic import LDA_Conv, LDA_Scores # - # ## Model selection # + base_pth = './f1000_LDA_Sentence_Run_0/' cv_arr, um_arr = LDA_Scores(base_pth) plt.subplot(1,2,1) plt.plot(cv_arr[:,0],cv_arr[:,1],'ko-') plt.ylabel("CV Score") plt.xlabel("Num Topics") plt.subplot(1,2,2) plt.plot(um_arr[:,0],um_arr[:,1],'ko-') plt.ylabel("UMASS Score") plt.xlabel("Num Topics") plt.tight_layout() # - # Best model (highest CV Score) is Model_13. # # Load corpus from Run_0 and Model_13. # + num_topics = '13' model_no = 'Model_' + num_topics model_pth = base_pth + model_no # Load sentence data from tsv path_load_tsv = '../pickles/f1000_tokenized_LDA_sentence_0.tsv' df = pd.read_csv(path_load_tsv,sep='\t',quoting=csv.QUOTE_NONE) df.drop(columns=['Unnamed: 0'],inplace=True) df['token'] = df['token'].str.split(',') # Create Corpus texts = df['token'] #Load Model lda_model = gensim.models.ldamodel.LdaModel.load(model_pth) id2word = gensim.corpora.Dictionary.load(model_pth + '.id2word') # Term Document Frequency corpus = [id2word.doc2bow(text) for text in texts] print('Total number of sentences: %s' % len(corpus)) # - # Check model convergence. # + iter_no, perplexity, likelihood = LDA_Conv(base_pth,num_topics) plt.subplot(1,2,1) plt.plot(iter_no,likelihood,c="black") plt.ylabel("log liklihood") plt.xlabel("iteration") plt.subplot(1,2,2) plt.plot(iter_no,perplexity,c="black") plt.ylabel("perplexity") plt.xlabel("iteration") plt.tight_layout() # - # Show topic visualization. IPython.display.HTML(filename=model_pth+'.html') # Show topic contents (text). lda_model.show_topics(num_topics=25, num_words=10, log=False, formatted=True) # Manually select the topics to be discarded (because they are full of technical jargon). bad_topics = [4,6,7,12] # Assign topics to initial dataframe. topics = [max(lda_model.get_document_topics(row),key=itemgetter(1))[0] for row in tqdm(corpus)] df['topics'] = topics df.head() sns.countplot(x='topics',data=df); # Filter topics. df_filter = df[~df['topics'].isin(bad_topics)].copy() df_filter.info() # Save to tsv. path_save_tsv = "../pickles/f1000_tokenized_LDA_sentence_1.tsv" df_filter['token'] = df_filter['token'].str.join(',') df_filter.to_csv(path_save_tsv,sep='\t',quoting=csv.QUOTE_NONE)
LDA/f1000_LDA_Sentence_Filter_0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="20RFH5fJlmoU" # # Regularization # + [markdown] colab_type="text" id="NhnitxlJlmoW" # ## What is Regularization? # + [markdown] colab_type="text" id="gPt3_vO2lmoX" # <img src="../r/1.png" /> # + [markdown] colab_type="text" id="L-et45HalmoY" # As we move towards the right in this image, our model tries to learn too well the details and the noise from the training data, which ultimately results in poor performance on the unseen data. # + [markdown] colab_type="text" id="Cj-EgOdTlmoZ" # In other words, while going towards the right, the complexity of the model increases such that the training error reduces but the testing error doesn’t. This is shown in the image below. # + [markdown] colab_type="text" id="XTxBAXp9lmoa" # <img src="../r/2.png" /> # + [markdown] colab_type="text" id="_fbx2bb-lmob" # Regularization is a technique which makes slight modifications to the learning algorithm such that the model generalizes better. This in turn improves the model’s performance on the unseen data as well. # + [markdown] colab_type="text" id="ZoBOI3x8lmod" # ## Underfitting and Overfitting # + [markdown] colab_type="text" id="RXHl79wolmoe" # Before diving further let’s understand the important terms: # # Bias – Assumptions made by a model to make a function easier to learn. # # Variance – If you train your data on training data and obtain a very low error, upon changing the data and then training the same previous model you experience high error, this is variance. # + [markdown] colab_type="text" id="H5FVNnzhlmof" # #### Underfitting # + [markdown] colab_type="text" id="0dJ-prYHlmog" # A statistical model or a machine learning algorithm is said to have underfitting when it cannot capture the underlying trend of the data. Underfitting destroys the accuracy of our machine learning model. Its occurrence simply means that our model or the algorithm does not fit the data well enough. It usually happens when we have less data to build an accurate model and also when we try to build a linear model with a non-linear data. In such cases the rules of the machine learning model are too easy and flexible to be applied on such minimal data and therefore the model will probably make a lot of wrong predictions. Underfitting can be avoided by using more data and also reducing the features by feature selection. # # Underfitting – High bias and low variance # # Techniques to reduce underfitting : # 1. Increase model complexity # 2. Increase number of features, performing feature engineering # 3. Remove noise from the data. # 4. Increase the number of epochs or increase the duration of training to get better results. # + [markdown] colab_type="text" id="D0Z1D2C3lmoj" # #### Overfitting # + [markdown] colab_type="text" id="c06krbaqlmol" # A statistical model is said to be overfitted, when we train it with a lot of data. When a model gets trained with so much of data, it starts learning from the noise and inaccurate data entries in our data set. Then the model does not categorize the data correctly, because of too many details and noise. The causes of overfitting are the non-parametric and non-linear methods because these types of machine learning algorithms have more freedom in building the model based on the dataset and therefore they can really build unrealistic models. A solution to avoid overfitting is using a linear algorithm if we have linear data or using the parameters like the maximal depth if we are using decision trees. # # Overfitting – High variance and low bias # # Techniques to reduce overfitting : # 1. Increase training data. # 2. Reduce model complexity. # 3. Early stopping during the training phase (have an eye over the loss over the training period as soon as loss begins to increase stop training). # 4. Ridge Regularization and Lasso Regularization # 5. Use dropout for neural networks to tackle overfitting. # + [markdown] colab_type="text" id="sZUR9dp2lmom" # <img src="../r/7.png" /> # <img src="../r/8.png" /> # + [markdown] colab_type="text" id="qKBncR2llmop" # ### Ridge Regression # + [markdown] colab_type="text" id="FYymr4AOlmoq" # Ridge regression is also called L2 regularization. It adds a constraint that is a linear function of the squared coefficients. # + [markdown] colab_type="text" id="5xo2joEXlmos" # <img src="../r/4.png" /> # <img src="../r/5.png" /> # + [markdown] colab_type="text" id="CEveA8_8lmot" # To minimize the regularized loss function, we need to choose λ to minimize the sum of the area of the circle and the area of the ellipsoid chosen by the tangency. # # Note that when λ tends to zero, the regularized loss function becomes the OLS loss function. # # When λ tends to infinity, we get an intercept-only model (because in this case, the ridge regression coefficients tend to zero). Now we have smaller variance but larger bias. # # A critique of ridge regression is that all the variables tend to end up in the model. The model only shrinks the coefficients. # + [markdown] colab_type="text" id="D1rYoxO7lmou" # ### Lasso # + [markdown] colab_type="text" id="Z50XcFW7lmov" # Lasso is also known as L1 regularization. It penalizes the model by the absolute weight coefficients. # + [markdown] colab_type="text" id="Y6T9pjK7lmox" # <img src="../r/6.png" /> # + [markdown] colab_type="text" id="RXBwjdnKlmoz" # Lasso works in the following way: it forces the sum of the absolute value of the coefficients to be less than a constant, which forces some of the coefficients to be zero and results in a simpler model. This is because comparing to the L2 regularization, the ellipsoid has the tendency to touch the diamond-shaped constraint on the corner. # # Lasso performs better than ridge regression in the sense that it helps a lot with feature selection. # + colab={} colab_type="code" id="Q2Uf_S23lmo6" # Import packages and observe dataset # + colab={} colab_type="code" id="AryxEyFAlmpA" #Import numerical libraries import pandas as pd import numpy as np #Import graphical plotting libraries import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline #Import Linear Regression Machine Learning Libraries from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge, Lasso from sklearn.metrics import r2_score # + colab={} colab_type="code" id="OMmycw9glmpE" outputId="678dea27-1b77-4ed8-a15a-00099d94ec03" data_raw = pd.read_csv('datasets/reg.csv') data_raw.head() # + colab={} colab_type="code" id="82mm8sMrlmpJ" #Drop car name #Replace origin into 1,2,3.. dont forget get_dummies #Replace ? with nan #Replace all nan with median data = data_raw.drop(['name'], axis = 1) data.head() # - data['origin'] = data['origin'].replace({1: 'america', 2: 'europe', 3: 'asia'}) data = pd.get_dummies(data, columns = ['origin']) data = data.replace('?', np.nan) data = data.apply(lambda x: x.fillna(x.median()), axis = 0) data.head() # + colab={} colab_type="code" id="PEzoE7HLlmpR" # Model building # + colab={} colab_type="code" id="WnV4zqeQlmpW" X = data.drop(['mpg'], axis = 1) # independent variable y = data[['mpg']] #dependent variable # + colab={} colab_type="code" id="JxfdOeeOlmpa" outputId="03846650-c3b3-4736-f4b9-234237103363" #Scaling the data X_s = preprocessing.scale(X) X_s = pd.DataFrame(X_s, columns = X.columns) #converting scaled data into dataframe y_s = preprocessing.scale(y) y_s = pd.DataFrame(y_s, columns = y.columns) #ideally train, test data should be in columns # + colab={} colab_type="code" id="8P5JzDBMlmpe" outputId="ed4436cb-8aa2-4d85-fc94-9ff4cf61378b" #Split into train, test set X_train, X_test, y_train,y_test = train_test_split(X_s, y_s, test_size = 0.30, random_state = 42) X_train.shape # + colab={} colab_type="code" id="QuEtPChAlmph" # Simple Linear Model # + colab={} colab_type="code" id="6oJNDtoIlmpl" outputId="857b5f78-64fe-4263-bd4f-fb0c1488177f" #Fit simple linear model and find coefficients regression_model = LinearRegression() regression_model.fit(X_train, y_train) print(f'Regression model coef: {regression_model.coef_}') # + colab={} colab_type="code" id="xquEQhq9lmpo" # Regularized Ridge Regression # + colab={} colab_type="code" id="5tgMytDTlmpr" outputId="068228d7-f34a-4335-99b5-3f4c8edc7466" #alpha factor here is lambda (penalty term) which helps to reduce the magnitude of coeff ridge_model = Ridge(alpha = 0.3) ridge_model.fit(X_train, y_train) print(f'Ridge model coef: {ridge_model.coef_}') #As the data has 10 columns hence 10 coefficients appear here # + colab={} colab_type="code" id="O3y8eJ0slmpv" # Regularized Lasso Regression # + colab={} colab_type="code" id="5F1M2L_ylmpx" outputId="f987a2b2-a9b5-4816-d6f7-1a9a8b2a72f0" #alpha factor here is lambda (penalty term) which helps to reduce the magnitude of coeff lasso_model = Lasso(alpha = 0.001) lasso_model.fit(X_train, y_train) print(f'Lasso model coef: {lasso_model.coef_}') #As the data has 10 columns hence 10 coefficients appear here # + colab={} colab_type="code" id="F2KmGpNXlmp0" # Score Comparison # + colab={} colab_type="code" id="8YBStPwilmp3" outputId="08dd9aba-9341-4afa-c3f6-a2ab96bdc325" #Model score - r^2 or coeff of determinant #r^2 = 1-(RSS/TSS) = Regression error/TSS #Simple Linear Model print("Simple: ", regression_model.score(X_test, y_test)) print('*************************') #Lasso print("Lasso: ",lasso_model.score(X_test, y_test)) print('*************************') #Ridge print("Ridge: ",ridge_model.score(X_test, y_test)) # -
Regularization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="D6O7IMQWWy8p" # Authored by: <NAME> <br> # Roll: 2019121004 # + [markdown] id="LeixqTEZWy8q" # **Note: dataset shape is version dependent hence final answer too will be dependent of sklearn version installed on machine** # + [markdown] id="2aEkoZdVfCpr" # # Excercise: Eigen Face # # Here, we will look into ability of PCA to perform dimensionality reduction on a set of Labeled Faces in the Wild dataset made available from scikit-learn. Our images will be of shape (62, 47). This problem is also famously known as the eigenface problem. Mathematically, we would like to find the principal components (or eigenvectors) of the covariance matrix of the set of face images. These eigenvectors are essentially a set of orthonormal features depicts the amount of variation between face images. When plotted, these eigenvectors are called eigenfaces. # + [markdown] id="XJYVUvgp4M-X" # #### Imports # + id="QKCQSSn-4M-Y" outputId="45043f18-8b04-4a02-de85-65d0d1436ef6" colab={"base_uri": "https://localhost:8080/", "height": 71} import numpy as np import matplotlib.pyplot as plt from numpy import pi from sklearn.datasets import fetch_lfw_people import seaborn as sns; sns.set() # + id="FfM2AVKxWy8v" outputId="2791bece-2ae6-4abf-d108-5079e66d2aa5" colab={"base_uri": "https://localhost:8080/", "height": 34} import sklearn print(sklearn.__version__) # + [markdown] id="pg2a_jPW4M-b" # #### Setup data # + id="plvaURfdX-Ux" outputId="29435e19-8b95-4193-c04b-9d866203ad96" colab={"base_uri": "https://localhost:8080/", "height": 1000} faces = fetch_lfw_people(min_faces_per_person=8) X = faces.data y = faces.target print(faces.target_names) print(faces.images.shape) # + [markdown] id="7LEmd2YHgAXN" # Note: **images num is version dependent** <br> # I get (4822, 62, 47) in my version of sklearn which is 0.22.2. <br> # # Since our images is of the shape (62, 47), we unroll each image into a single row vector of shape (1, 4822). This means that we have 4822 features defining each image. These 4822 features will result into 4822 principal components in the PCA projection space. Therefore, each image location contributes more or less to each principal component. # + [markdown] id="QY8dDwvU4M-j" # #### Implement Eigen Faces # + id="3wMzcqj4Wy83" outputId="156aa162-7cad-4bc6-d486-d2eab301f6ec" colab={"base_uri": "https://localhost:8080/", "height": 34} print(faces.images.shape) # + id="yuz9hR7J4M-h" img_shape = faces.images.shape[1:] # + id="NaTvtCHFWy89" outputId="2cd9ee13-ab21-4b4a-b68f-7495343e2c29" colab={"base_uri": "https://localhost:8080/", "height": 34} print(img_shape) # + id="w7Kaxc334M-k" def FindEigen(X_mat): X_mat -= np.mean(X_mat, axis=0, keepdims=True) temp = np.matmul(X_mat.T, X_mat) cov_mat = 1/X_mat.shape[0]* temp eigvals, eigvecs = np.linalg.eig(cov_mat) ind = eigvals.argsort()[::-1] return np.real(eigvals[ind]), np.real(eigvecs[:, ind]) # + id="fAnHSOh24M-s" def plotFace(faces, h=10, v=1): fig, axes = plt.subplots(v, h, figsize=(10, 2.5), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(faces[i].reshape(*img_shape), cmap='gray') # + id="Oz47DWzdWy9F" def plotgraph(eigenvals): plt.plot(range(1, eigenvals.shape[0]+1), np.cumsum(eigenvals / np.sum(eigenvals))) plt.show() # + id="n3RLci1U4M-o" def PrincipalComponentsNum(X, eigenvals, threshold=0.95): num = np.argmax(np.cumsum(eigenvals / np.sum(eigenvals)) >= threshold) + 1 print(f"No. of principal components required to preserve {threshold*100} % variance is: {num}.") # + [markdown] id="GlmcqIMz4M-u" # ### Q1 # # How many principal components are required such that 95% of the vari- # ance in the data is preserved? # + id="es6f3svW4M-m" eigenvals, eigenvecs = FindEigen(X) # + id="y1OhKkcx4M-v" outputId="d794a65f-9e4f-44c4-e176-a2a1331f1595" colab={"base_uri": "https://localhost:8080/", "height": 268} plotgraph(eigenvals) # + id="X_MwWzMr4M-x" outputId="f3f9e213-d3b6-44d0-8055-2edde5a80e7e" colab={"base_uri": "https://localhost:8080/", "height": 34} PrincipalComponentsNum(X, eigenvals) # + [markdown] id="sDtK3q_Y4M-z" # ### Q2 # # Show the reconstruction of the first 10 face images using only 100 principal # components. # + id="Uiix05OLWy9U" def reconstructMat(X, eigvecs, num_c): return (np.matmul(X,np.matmul(eigvecs[:, :num_c], eigvecs[:, :num_c].T))) # + id="tff6pVhoWy9W" outputId="3c8d5114-c1ec-4b63-b8ce-655b896a7cfd" colab={"base_uri": "https://localhost:8080/", "height": 115} faceNum = 10 print('original faces') plotFace(X[:faceNum, :], faceNum) # + id="ieeagM-J4M-z" outputId="d2139284-1bb3-4980-cfae-b94fbcf64ebc" colab={"base_uri": "https://localhost:8080/", "height": 115} recFace = reconstructMat(X[:faceNum, :], eigenvecs, 100) print('reconstructed faces using only 100 principal components') plotFace(recFace, faceNum) # + [markdown] id="a0LBIuwXP6Dv" # # Adding noise to images # # We now add gaussian noise to the images. Will PCA be able to effectively perform dimensionality reduction? # + id="TKDPL9MCMekO" def plot_noisy_faces(noisy_faces): fig, axes = plt.subplots(2, 10, figsize=(10, 2.5), subplot_kw={'xticks':[], 'yticks':[]}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(noisy_faces[i].reshape(62, 47), cmap='binary_r') # + [markdown] id="ZRSbkzN1Quvg" # Below we plot first twenty noisy input face images. # + id="9zhLo04mN8z_" outputId="287c0d99-95d7-48c3-e86b-2a8779ce9f58" colab={"base_uri": "https://localhost:8080/", "height": 167} np.random.seed(42) noisy_faces = np.random.normal(X, 15) plot_noisy_faces(noisy_faces) # + id="fGhFGySS4M-_" outputId="1613304e-bda2-4a50-86d4-ff8dc638fd0c" colab={"base_uri": "https://localhost:8080/", "height": 34} noisy_faces.shape # + id="04-Ss4me4M_B" noisy_eigenvals, noisy_eigenvecs = FindEigen(noisy_faces) # + [markdown] id="dbPIRvYS4M_C" # ### Q3.1 # Show the above two results for a noisy face dataset. # How many principal components are required such that 95% of the vari- # ance in the data is preserved? # + id="t9JPJ9pK4M_D" outputId="bb212c44-2cb1-446e-adef-4c29fdc321dc" colab={"base_uri": "https://localhost:8080/", "height": 268} plotgraph(noisy_eigenvals) # + id="0w3N21a34M_E" outputId="b0a43c91-9ae7-4140-8ff7-c30e19a82754" colab={"base_uri": "https://localhost:8080/", "height": 34} PrincipalComponentsNum(noisy_faces, noisy_eigenvals, 0.95) # + [markdown] id="oi81iAPL4M_G" # ### Q3.2 # # Show the reconstruction of the first 10 face images using only 100 principal # components. # + id="IiQdc72j4M_G" faces = 10 noisy_recons = reconstructMat(noisy_faces[:faces, :], noisy_eigenvecs, 100) # + id="oe0spryC4M_I" outputId="7606e025-1ea2-4bb1-d142-6fdb78a720a4" colab={"base_uri": "https://localhost:8080/", "height": 115} print('reconstructed faces for nosiy images only 100 principal components') plotFace(noisy_recons, faces)
HW5/2019121004_pca.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Notebook to plot training progress of agent # # It will obtain data from file stored in format like `bullet_racecar.cma.1.32.hist.json` and `.hist_best.json`, where format is `env_name.optimizer.num_rollouts.popsize.hist.json`. # import numpy as np import json import os import matplotlib.pyplot as plt env_name = 'carracing' optimizer = 'cma' num_rollouts = 16 # number of rollouts that are averaged over an episode popsize = 64 file_base = env_name+'.'+optimizer+'.'+str(num_rollouts)+'.'+str(popsize) filename = os.path.join('log', file_base+'.hist.json') with open(filename, 'r') as f: raw_data = json.load(f) data = np.array(raw_data) print(data.shape) required_score = 900.0 file_base = env_name+'.'+optimizer+'.'+str(num_rollouts)+'.'+str(popsize) filename = os.path.join('log', file_base+'.hist_best.json') with open(filename, 'r') as f: raw_data = json.load(f) raw_best_data = np.array(raw_data) print(raw_best_data.shape) best_data = [] for bdata in raw_best_data: best_data.append([float(bdata[0]), float(bdata[1]), float(bdata[5]), float(bdata[9]), required_score]) best_data = np.array(best_data) fig=plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k') line_mean, = plt.plot(data[:, 1]/(60*24*60), data[:, 2]) line_min, = plt.plot(data[:, 1]/(60*24*60), data[:, 3]) line_max, = plt.plot(data[:, 1]/(60*24*60), data[:, 4]) line_best, = plt.plot(best_data[:, 1]/(60*24*60), best_data[:, 2]) line_req, = plt.plot(best_data[:, 1]/(60*24*60), best_data[:, 4]) plt.legend([line_mean, line_min, line_max, line_req, line_best], ['mean', 'min', 'max', 'requirement', 'best avg score']) plt.xlabel('wall-clock time (days)') plt.xticks(np.arange(0, 48, 5)) plt.ylabel('cumulative reward') plt.yticks(np.arange(-100, 1000, 50)) plt.title(file_base) plt.savefig(file_base+".wall.svg") plt.show() fig=plt.figure(figsize=(16, 10), dpi= 80, facecolor='w', edgecolor='k') line_mean, = plt.plot(data[:, 0], data[:, 2]) line_min, = plt.plot(data[:, 0], data[:, 3]) line_max, = plt.plot(data[:, 0], data[:, 4]) line_best, = plt.plot(best_data[:, 0], best_data[:, 2]) line_req, = plt.plot(best_data[:, 0], best_data[:, 4]) plt.legend([line_mean, line_min, line_max, line_req, line_best], ['mean', 'min', 'max', 'requirement', 'best avg score']) plt.xlabel('generation') plt.xticks(np.arange(0, 2000, 200)) plt.ylabel('cumulative reward') plt.yticks(np.arange(-100, 1000, 50)) plt.title(file_base) plt.savefig(file_base+".svg") plt.show()
carracing/.ipynb_checkpoints/plot_training_progress-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Implementing FIR filters # # In real-time filtering applications, filters are implemented by using some variation or other of their constant-coefficient difference equation (CCDE), so that one new output sample is generated for each new input sample. If all input data is available in advance, as in non-real-time (aka "offline") applications, then the CCDE-based algorithm is iteratively applied to all samples in the buffer. # # In the case of FIR filters, the CCDE coefficients correspond to the impulse response and implementing the CCDE is equivalent to performing a convolution sum. In this notebook we will look at different ways to implement FIR filters. # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np # ## Online implementation # # The classic way to implement a filter is the one-in one-out approach. We will need to implement a persistent delay line. In Python we can either define a class or use function attributes; classes are tidier and reusable: class FIR_loop(): def __init__(self, h): self.h = h self.ix = 0 self.M = len(h) self.buf = np.zeros(self.M) def filter(self, x): y = 0 self.buf[self.ix] = x for n in range(0, self.M): y += self.h[n] * self.buf[(self.ix+self.M-n) % self.M] self.ix = (self.ix + 1) % self.M return y # + # simple moving average: h = np.ones(5)/5 f = FIR_loop(h) for n in range(0, 10): print(f.filter(n), end=", ") # - # While there's nothing wrong with the above implementation, when the data to be filtered is known in advance, it makes no sense to explicitly iterate over its element and it's better to use higher-level commands to perform the convolution. In Numpy, the command is `convolve`; before we use it, though, we need to take border effects into consideration. # ## Offline implementations: border effects # # When filtering a finite-length data vector with a finite-length impulse response, we need to decide what to do with the "invalid" shifts appearing in the terms of the convolution sum. Remember that, in the infinite-length case, the output is defined as # # $$ # y[n] = \sum_{k=-\infty}^{\infty} h[k]x[n-k] # $$ # # Let's say that the impulse response is $M$ points long, so that $h[n]$ is nonzero only between $0$ and $M-1$; this means that the sum is reduced to # # $$ # y[n] = \sum_{k=0}^{M-1} h[k]x[n-k] # $$ # # Now assume that $x[n]$ is a length-$N$ signal, so it is defined only for $0 \leq n \le N$ (we can safely consider $N > M$, otherwise exchange the roles of $x$ and $h$). In this case, the above sum is properly defined only for $M - 1 \le n \le N-1$; for any other value of $n$, the sum will contain an element $x[n-k]$ outside of the valid range of indices for the input. # # So, if we start with an $N$-point input, we can only formally compute $N-M+1$ output samples. While this may not be a problem in some applications, it certainly is troublesome if repeated filtering operations end up "chipping away" at the signal little by little. # # The solution is to "embed" the finite-length input data signal into an infinite-length sequence and, as always, the result will depend on the method we choose: finite support or periodization. (Note that the impulse response is already an infinite sequence since it's the response of the filter to the infinite sequence $\delta[n]$). # # However, the embedding will create "artificial" data points that are dependent on the chosen embedding: these data points are said to suffer from **border effects**. # Let's build a simple signal and a simple FIR filter: # + # let's use a simple moving average: M = 5 h = np.ones(M)/float(M) # let's build a signal with a ramp and a plateau x = np.concatenate((np.arange(1, 9), np.ones(5) * 8, np.arange(8,0,-1))) plt.stem(x); print('signal length: ', len(x)) # - # ### 1) No border effects # # We may choose to accept the loss of data points and use only the $N-M+1$ output samples that correspond to a full overlap between the input data and the impulse response. This can be achieved by selecting `mode='valid'` in `correlate`: y = np.convolve(x, h, mode='valid') print('signal length: ', len(y)) plt.stem(y); # ### 2) finite-support extension # # By embedding the input into a finite-support signal, the convolution sum is now well defined for all values of $n$, which now creates a new problem: the output will be nonzero for all values of $n$ for which $x[n-k]$ is nonzero, that is for $0 \le n \le N+M-1$: we end up with a *longer* support for the output sequence. This is the default in `correlate` and corresponds to `mode='full'`: y = np.convolve(x, h, mode='full') print('signal length: ', len(y)) plt.stem(y); # If we want to preserve the same length for input and output, we need to truncate the result. You can keep the *first* $N$ samples and discard the tail; this corresponds to the online implementation of the FIR filter. Alternatively, you can discard half the extra samples from the beginning and half from the end of the output and distribute the border effect evenly; this is achieved in `correlate` by setting `mode='same'`: y = np.convolve(x, h, mode='same') print('signal length: ', len(y)) plt.stem(y); # ### 3) Periodic extension # # As we know, the other way of embedding a finite-length signal is to build a periodic extension. The convolution in this case will return an $N$-periodic output: # # $$ # \tilde{y}[n] = \sum_{k=0}^{M-1} h[k]\tilde{x}[n-k] # $$ # # We can easily implement a circular convolution using `convolve` like so: since the overlap between time-reversed impulse response and input is already good for the last $N-M$ points in the output, we just need to consider two periods of the input to compute the first $M$: def cconv(x, h): # as before, we assume len(h) < len(x) L = len(x) xp = np.concatenate((x,x)) # full convolution y = np.convolve(xp, h) return y[L:2*L] y = cconv(x, h) print('signal length: ', len(y)) plt.stem(y); # OK, clearly the result is not necessarily what we expected; note however that in both circular and "normal" convolution, you still have $M-1$ output samples "touched" by border effects, it's just that the border effects act differently in the two cases. # # Interestingly, you can still obtain a "normal" convolution using a circular convolution if you zero-pad the input signal with $M-1$ zeros: y = cconv(np.concatenate((x, np.zeros(M-1))), h) print('signal length: ', len(y)) plt.stem(y); # plot in red the difference with the standard conv plt.stem(y - np.convolve(x, h, mode='full'), markerfmt='ro'); # Why is this interesting? Because of the DFT.... # ## Offline implementations using the DFT # # The convolution theorem states that, for infinite sequences, # # $$ # (x\ast y)[n] = \mbox{IDTFT}\{X(e^{j\omega})Y(e^{j\omega})\}[n] # $$ # # Can we apply this result to the finite-length case? In other words, what is the inverse DFT of the product of two DFTs? Let's see: # # \begin{align} # \sum_{k=0}^{N-1}X[k]Y[k]e^{j\frac{2\pi}{N}nk} &= \sum_{k=0}^{N-1}\sum_{p=0}^{N-1}x[p]e^{-j\frac{2\pi}{N}pk}\sum_{q=0}^{N-1}y[q]e^{-j\frac{2\pi}{N}qk} \,e^{j\frac{2\pi}{N}nk} \\ # &= \sum_{p=0}^{N-1}\sum_{q=0}^{N-1}x[p]y[q]\sum_{k=0}^{N-1}e^{j\frac{2\pi}{N}(n-p-q)k} \\ # &= N\sum_{p=0}^{N-1}x[p]y[(n-p) \mod N] # \end{align} # # The results follows from the fact that $\sum_{k=0}^{N-1}e^{j\frac{2\pi}{N}(n-p-q)k}$ is nonzero only for $n-p-q$ multiple of $N$; as $p$ varies from $0$ to $N-1$, the corresponding value of $q$ between $0$ and $N$ that makes $n-p-q$ multiple of $N$ is $(n-p) \mod N$. # # So the fundamental result is: **the inverse DFT of the product of two DFTs is the circular convolution of the underlying time-domain sequences!** # # # To apply this result to FIR filtering, the first step is to choose the space for the DFTs. In our case we have a finite-length data vector of length $N$ and a finite-support impulse response of length $M$ with $M<N$ so let's operate in $\mathbb{C}^N$ by zero-padding the impulse response to size $N$. Also, we most likely want the normal convolution, so let's zero-pad both signals by an additional $M-1$ samples def DFTconv(x, h, mode='full'): # we want the compute the full convolution N = len(x) M = len(h) X = np.fft.fft(x, n=N+M-1) H = np.fft.fft(h, n=N+M-1) # we're using real-valued signals, so drop the imaginary part y = np.real(np.fft.ifft(X * H)) if mode == 'valid': # only N-M+1 points, starting at M-1 return y[M-1:N] elif mode == 'same': return y[int((M-1)/2):int((M-1)/2)+N] else: return y # Let's verify that the results are the same y = np.convolve(x, h, mode='valid') print('signal length: ', len(y)) plt.stem(y); y = DFTconv(x, h, mode='valid') print('signal length: ', len(y)) plt.stem(y, markerfmt='ro'); y = np.convolve(x, h, mode='same') print('signal length: ', len(y)) plt.stem(y); y = DFTconv(x, h, mode='same') print('signal length: ', len(y)) plt.stem(y, markerfmt='ro'); # Of course the question at this point is: why go through the trouble of taking DFTs if all we want is the standard convolution? The answer is: **computational efficiency.** # # If you look at the convolution sum, each output sample requires $M$ multiplications (and $M-1$ additions but let's just consider multiplications). In order to filter an $N$-point signal we will need $NM$ multiplications. Assume $N \approx M$ and you can see that the computational requirements are on the order of $M^2$. If we go the DFT route using an efficient FFT implementation we have approximately: # # * $M\log_2 M$ multiplication to compute $H[k]$ # * $M\log_2 M$ multiplication to compute $X[k]$ # * $M\log_2 M$ multiplication to compute $X[k]H[k]$ # * $M\log_2 M$ multiplication to compute the inverse DFT # # Even considering that we now have to use complex multiplications (which will cost twice as much), we can estimate the cost of the DFT based convolution at around $8M\log_2M$, which is smaller than $M^2$ as soon as $M>44$. # In practice, the data vector is much longer than the impulse response so that filtering via standard convolution requires on the order of $MN$ operations. Two techniques, called [Overlap Add](https://en.wikipedia.org/wiki/Overlap%E2%80%93add_method) and [Overlap Save](https://en.wikipedia.org/wiki/Overlap%E2%80%93save_method) # can be used to divide the convolution into $N/M$ independent convolutions between $h[n]$ and an $M$-sized piece of $x[n]$; FFT-based convolution can then be used on each piece. While the exact cost per sample of each technique is a bit complicated to estimate, as a rule of thumb **as soon as the impulse response is longer than 50 samples, it's more convenient to use DFT-based filtering.**
Digital Signal Processing 2 Filtering/implementation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Titanic # This data science project is about setting good practices while building data science models. For this particular example, we are going to use the titanic dataset, and work both with pandas and spark. # ## References ## # # For a more detailed overview about the project template and the datalabframework package, please refer to the notebooks in the [how to directory](publish/reports/howto/howto.ipynb). # # - [ETL](etl/main.ipynb) # - [Models](models/main.ipnb) # - [Reports](publish/reports/main.ipynb) # # ## Data ## # # The Titanic dataset is provided by Kaggle. import datalabframework as dlf metadata = dlf.params.metadata('prod') dlf.utils.pretty_print(metadata) metadata_info = dlf.params.metadata_files() dlf.utils.pretty_print(metadata_info) dlf.project.filename(False) dlf.project.filename() dlf.project.rootpath() dlf.utils.pretty_print(dlf.project.info())
datalabframework/templates/titanic/{{cookiecutter.project_name}}/src/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd from splinter import Browser import bs4 as bs import time # + executable_path = {'executable_path': 'chromedriver.exe'} browser = Browser('chrome', **executable_path, headless=False) url = "https://gendo90.github.io/javascript-challenge/StarterCode/index.html" browser.visit(url) time.sleep(2) ufo_data = browser.find_by_id("table-area") all_table_data = ufo_data.html # + ufo_df = pd.read_html(all_table_data) all_ufos_df = ufo_df[0] all_ufos_df # - all_ufos_df.to_csv("all_ufos.csv", index=False)
UFO Data ETL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + # default_exp models.layers.activation # - # # Activation Layers # > Implementation of NN activation layers in Pytorch. #hide from nbdev.showdoc import * from fastcore.nb_imports import * from fastcore.test import * # + #export import torch from torch import nn import math # - # ## Dice #export class Dice(nn.Module): def __init__(self, input_dim, eps=1e-9): super(Dice, self).__init__() self.bn = nn.BatchNorm1d(input_dim, affine=False, eps=eps, momentum=0.01) self.alpha = nn.Parameter(torch.zeros(input_dim)) def forward(self, X): p = torch.sigmoid(self.bn(X)) output = p * X + (1 - p) * self.alpha * X return output # ## GELU #export def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) x = torch.tensor([[0.9728, 0.9928, 0.7223]]) test_eq(torch.round(gelu(x) * 100) / (100), torch.tensor([[0.81, 0.83, 0.55]])) # ## swish #export def swish(x): return x * torch.sigmoid(x) x = torch.tensor([[0.9728, 0.9928, 0.7223]]) test_eq(torch.round(swish(x) * 100) / (100), torch.tensor([[0.71, 0.72, 0.49]])) # > **References** # > - https://github.com/xue-pai/FuxiCTR/blob/main/fuxictr/pytorch/layers/activation.py #hide # %reload_ext watermark # %watermark -a "Sparsh A." -m -iv -u -t -d
nbs/models/layers/models.layers.activation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.0 ('dhiraj_ml_march') # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df=pd.read_csv(r"C:\Users\dhire\Documents\Machine_learning_Inuron\ML_Live_Class\data\Advertising.csv") df.head() x=df.drop(['sales'],axis=1) x.head() y=df['sales'] y from sklearn.linear_model import LinearRegression from sklearn.preprocessing import PolynomialFeatures poly_conv=PolynomialFeatures(degree=3,include_bias=False) final_model=LinearRegression() poly_features=poly_conv.fit_transform(x) x.shape poly_features.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(poly_features, y, test_size=0.33, random_state=42) X_train.shape X_test.shape from sklearn.preprocessing import StandardScaler scalar=StandardScaler() scalar.fit(X_train) scaled_X_train=scalar.transform(X_train) scaled_X_test=scalar.transform(X_test) X_train[0] scaled_X_train[0] # #### Ridge regression from sklearn.linear_model import Ridge ridge_model=Ridge(alpha=10) ridge_model.fit(X_train,y_train) test_prediction=ridge_model.predict(X_test) from sklearn.metrics import mean_absolute_error,mean_squared_error MAE=mean_absolute_error(y_test,test_prediction) MAE RMSE=np.sqrt(mean_squared_error(y_test,test_prediction)) RMSE
Regularisation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Is List Palindrome # Note: Try to solve this task in O(n) time using O(1) additional space, where n is the number of elements in l, since this is what you'll be asked to do during an interview. # # Given a singly linked list of integers, determine whether or not it's a palindrome. # # Note: in examples below and tests preview linked lists are presented as arrays just for simplicity of visualization: in real data you will be given a head node l of the linked list # # Example # # For l = [0, 1, 0], the output should be # isListPalindrome(l) = true; # # For l = [1, 2, 2, 3], the output should be # isListPalindrome(l) = false. # # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] linkedlist.integer l # # A singly linked list of integers. # # Guaranteed constraints: # 0 ≤ list size ≤ 5 · 105, # -109 ≤ element value ≤ 109. # # [output] boolean # # Return true if l is a palindrome, otherwise return false. # # - Thinking # # Can you determine the length of a linked list without doing a full pass? Because it's a singly linked list, I think the only option is to go to the end of the list and count the steps. That means # - Determine length of list # - Determine odd or even # - if even, check if two middle values are the same # - if not, not palindrome # - if yes, yes palindrome # - if odd, check if value to left is same as value to right, move out to edges until no more # - if at any point, edges don't match up, not palindrome # - if at end of run, no issues, is palindrome # - For this to work, need to build two versions of graph, one forwards and one backwards # # - If you build a list as you go, you don't need to build a backwards version of the graph # + # Singly-linked lists are already defined with this interface: class ListNode(object): def __init__(self, x): self.value = x self.next = None # There should be a pivot point, add to a stack, pop on backend # racecar e # abbccbba # How to do this in O(1) space??? # Can do with def graph_builder(array): root = ListNode(array[0]) foc = root for i in range(1, len(array)): foc.next = ListNode(array[i]) foc = foc.next return root def isListPalindrome(l): if l is None: return True count = 0 #tab = {} # Dict approach tab = [] while l is not None: #tab[len(tab)] = l # Dict approach, probably more memory efficient tab.append(l.value) l = l.next print('tab',tab) a = len(tab) if a % 2 == 0: # print(tab[(a // 2) -1].value, tab[(a // 2)].value) # L = [i.value for i in tab[:(a//2)-1]] # R = [i.value for i in tab[(a//2)+1:]] L = tab[:(a//2)-1] R = tab[(a//2)+1:] print(L,R) else: # print(tab[(a//2)].value) # L = [i.value for i in tab[:a//2]] # R = [i.value for i in tab[(a//2)+1:]] L = tab[:a//2] R = tab[(a//2)+1:] print(L,R) print(tab) if L == R[::-1] return True else: return False pt1 = [1, 2, 2, 3] pal_test_1 = graph_builder(pt1) pt2 = [0, 1, 0] pal_test_2 = graph_builder(pt2) pt3 = [0, 1, 2, 3, 3, 2, 1, 0] pal_test_3 = graph_builder(pt3) n = pal_test_2 while n: print(n.value) n = n.next isListPalindrome(pal_test_2) # This'll work but it's not nearly O(1) space complexity # Does it count as O(1) if you delete all other variables before returning? # Made brute force solve... what truisms about palindromes can be exploited to get to in-place? # Aside from the middle one or two, there should be 2 of each value at least...not good enough # Solution passes 20/22 test cases... seems like a time complexity filter on the last two, maybe space filter # - g = [1,2,3] if g == g[::-1]: print('f') v = {0:'a', 1:'b', 2:'c'} list(v.values()) # ### Kth Element # # Given array of integers, remove each kth element from it. # # Example # # For inputArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] and k = 3, the output should be # extractEachKth(inputArray, k) = [1, 2, 4, 5, 7, 8, 10]. # # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] array.integer inputArray # # Guaranteed constraints: # 5 ≤ inputArray.length ≤ 15, # -20 ≤ inputArray[i] ≤ 20. # # [input] integer k # # Guaranteed constraints: # 1 ≤ k ≤ 10. # # [output] array.integer # # inputArray without elements k - 1, 2k - 1, 3k - 1 etc. # + def kth_element(List, k): l = [] for i in range(len(List)): if (i+1) % k == 0: pass else: l.append(List[i]) return l k_test_1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] t = kth_element(k_test_1, 3) t # - # ### Sudoku # # Sudoku is a number-placement puzzle. The objective is to fill a 9 × 9 grid with numbers in such a way that each column, each row, and each of the nine 3 × 3 sub-grids that compose the grid all contain all of the numbers from 1 to 9 one time. # # Implement an algorithm that will check whether the given grid of numbers represents a valid Sudoku puzzle according to the layout rules described above. Note that the puzzle represented by grid does not have to be solvable. # # Example # # For # # grid = [['.', '.', '.', '1', '4', '.', '.', '2', '.'], # ['.', '.', '6', '.', '.', '.', '.', '.', '.'], # ['.', '.', '.', '.', '.', '.', '.', '.', '.'], # ['.', '.', '1', '.', '.', '.', '.', '.', '.'], # ['.', '6', '7', '.', '.', '.', '.', '.', '9'], # ['.', '.', '.', '.', '.', '.', '8', '1', '.'], # ['.', '3', '.', '.', '.', '.', '.', '.', '6'], # ['.', '.', '.', '.', '.', '7', '.', '.', '.'], # ['.', '.', '.', '5', '.', '.', '.', '7', '.']] # the output should be # sudoku2(grid) = true; # # For # # grid = [['.', '.', '.', '.', '2', '.', '.', '9', '.'], # ['.', '.', '.', '.', '6', '.', '.', '.', '.'], # ['7', '1', '.', '.', '7', '5', '.', '.', '.'], # ['.', '7', '.', '.', '.', '.', '.', '.', '.'], # ['.', '.', '.', '.', '8', '3', '.', '.', '.'], # ['.', '.', '8', '.', '.', '7', '.', '6', '.'], # ['.', '.', '.', '.', '.', '2', '.', '.', '.'], # ['.', '1', '.', '2', '.', '.', '.', '.', '.'], # ['.', '2', '.', '.', '3', '.', '.', '.', '.']] # the output should be # sudoku2(grid) = false. # # The given grid is not correct because there are two 1s in the second column. Each column, each row, and each 3 × 3 subgrid can only contain the numbers 1 through 9 one time. # # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] array.array.char grid # # A 9 × 9 array of characters, in which each character is either a digit from '1' to '9' or a period '.'. # # [output] boolean # # Return true if grid represents a valid Sudoku puzzle, otherwise return false. # + def check(arr): seen = [] for i in arr: # *O(9*27) if i in seen: return False seen.append(i) if i > 9: return False if i < 1: return False return True def sudoku(arr): to_check = [] cols = [] rows = [] wins = [] for i in range(len(arr)): # O(N) # Could probably check rows and columns in same loop (always 9x9) # Could do this without double for loop, but probably would have the same efficiency # Will do list comp for ease, not the most efficient option row_vals = [int(arr[i][j]) for j in range(len(arr[i])) if arr[i][j] != '.'] # *O(N) col_vals = [int(arr[j][i]) for j in range(len(arr[i])) if arr[j][i] != '.'] # *O(N) to_check.append(row_vals) to_check.append(col_vals) if i % 3 == 0: win1 = arr[i][:3] + arr[i+1][:3] + arr[i+2][:3] win1 = [int(i) for i in win1 if i != '.'] # *O(N) win2 = arr[i][3:6] + arr[i+1][3:6] + arr[i+2][3:6] win2 = [int(i) for i in win2 if i != '.'] # *O(N) win3 = arr[i][6:9] + arr[i+1][6:9] + arr[i+2][6:9] win3 = [int(i) for i in win3 if i != '.'] # *O(N) to_check.append(win1) to_check.append(win2) to_check.append(win3) for i in to_check: if check(i): pass else: return False return True # How to check vals 1-9 or dots # check for duplicates # check for values below 0 # check for values above 9 t_grid_1 = [['.', '.', '.', '1', '4', '.', '.', '2', '.'], ['.', '.', '6', '.', '.', '.', '.', '.', '.'], ['.', '.', '.', '.', '.', '.', '.', '.', '.'], ['.', '.', '1', '.', '.', '.', '.', '.', '.'], ['.', '6', '7', '.', '.', '.', '.', '.', '9'], ['.', '.', '.', '.', '.', '.', '8', '1', '.'], ['.', '3', '.', '.', '.', '.', '.', '.', '6'], ['.', '.', '.', '.', '.', '7', '.', '.', '.'], ['.', '.', '.', '5', '.', '.', '.', '7', '.']] t_grid_2 = [['.', '.', '.', '.', '2', '.', '.', '9', '.'], ['.', '.', '.', '.', '6', '.', '.', '.', '.'], ['7', '1', '.', '.', '7', '5', '.', '.', '.'], ['.', '7', '.', '.', '.', '.', '.', '.', '.'], ['.', '.', '.', '.', '8', '3', '.', '.', '.'], ['.', '.', '8', '.', '.', '7', '.', '6', '.'], ['.', '.', '.', '.', '.', '2', '.', '.', '.'], ['.', '1', '.', '2', '.', '.', '.', '.', '.'], ['.', '2', '.', '.', '3', '.', '.', '.', '.']] print('Grid 1', sudoku(t_grid_1)) print('Grid 2', sudoku(t_grid_2)) # Takes brute force to a whole new level... time complexity of O(N * (5N*.334) + O(27*9)) great # - # ### First Digit # # Find the leftmost digit that occurs in a given string. # # Example # # For inputString = "var_1__Int", the output should be # firstDigit(inputString) = '1'; # For inputString = "q2q-q", the output should be # firstDigit(inputString) = '2'; # For inputString = "0ss", the output should be # firstDigit(inputString) = '0'. # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] string inputString # # A string containing at least one digit. # # Guaranteed constraints: # 3 ≤ inputString.length ≤ 10. # # [output] char # + # So the horrifying way to do this is with try and except, like so def first_digit(string): for i in string: # O(N) try: int(i) return i except: pass # The slightly less horrifying way is to use a list of nums digit_list = ['0','1','2','3','4','5','6','7','8','9'] def first_digit(string): for i in string: if i in digit_list: # O(N**2) return i # Is there a non-horrifying way to do this? O(N) without try/except # Strip out all wqqaq2e3d4r5t6t5sy77 # - # ### Is Crypto Solution # # A cryptarithm is a mathematical puzzle for which the goal is to find the correspondence between letters and digits, such that the given arithmetic equation consisting of letters holds true when the letters are converted to digits. # # You have an array of strings crypt, the cryptarithm, and an an array containing the mapping of letters and digits, solution. The array crypt will contain three non-empty strings that follow the structure: [word1, word2, word3], which should be interpreted as the word1 + word2 = word3 cryptarithm. # # If crypt, when it is decoded by replacing all of the letters in the cryptarithm with digits using the mapping in solution, becomes a valid arithmetic equation containing no numbers with leading zeroes, the answer is true. If it does not become a valid arithmetic solution, the answer is false. # # Note that number 0 doesn't contain leading zeroes (while for example 00 or 0123 do). # # Example # # For crypt = ["SEND", "MORE", "MONEY"] and # # solution = [['O', '0'], # ['M', '1'], # ['Y', '2'], # ['E', '5'], # ['N', '6'], # ['D', '7'], # ['R', '8'], # ['S', '9']] # the output should be # isCryptSolution(crypt, solution) = true. # # When you decrypt "SEND", "MORE", and "MONEY" using the mapping given in crypt, you get 9567 + 1085 = 10652 which is correct and a valid arithmetic equation. # # For crypt = ["TEN", "TWO", "ONE"] and # # solution = [['O', '1'], # ['T', '0'], # ['W', '9'], # ['E', '5'], # ['N', '4']] # the output should be # isCryptSolution(crypt, solution) = false. # # Even though 054 + 091 = 145, 054 and 091 both contain leading zeroes, meaning that this is not a valid solution. # # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] array.string crypt # # An array of three non-empty strings containing only uppercase English letters. # # Guaranteed constraints: # crypt.length = 3, # 1 ≤ crypt[i].length ≤ 14. # # [input] array.array.char solution # # An array consisting of pairs of characters that represent the correspondence between letters and numbers in the cryptarithm. The first character in the pair is an uppercase English letter, and the second one is a digit in the range from 0 to 9. # # It is guaranteed that solution only contains entries for the letters present in crypt and that different letters have different values. # # Guaranteed constraints: # solution[i].length = 2, # 'A' ≤ solution[i][0] ≤ 'Z', # '0' ≤ solution[i][1] ≤ '9', # solution[i][0] ≠ solution[j][0], i ≠ j, # solution[i][1] ≠ solution[j][1], i ≠ j. # # [output] boolean # # Return true if the solution represents the correct solution to the cryptarithm crypt, otherwise return false. # + def crypt_solution(crypt, solution): mapping = {i[0]:i[1] for i in solution} tot = [] for i in crypt: conv = "".join([mapping[j] for j in i]) tot.append(conv) if int(tot[0]) + int(tot[1]) == int(tot[2]): return True else: return False crypt = ["SEND", "MORE", "MONEY"] solution = [['O', '0'], ['M', '1'], ['Y', '2'], ['E', '5'], ['N', '6'], ['D', '7'], ['R', '8'], ['S', '9']] crypt_solution(crypt, solution) # - # ### String Reshaping # # Given an array of equal-length strings, you'd like to know if it's possible to rearrange the order of the elements in such a way that each consecutive pair of strings differ by exactly one character. Return true if it's possible, and false if not. # # Note: You're only rearranging the order of the strings, not the order of the letters within the strings! # # Example # # For inputArray = ["aba", "bbb", "bab"], the output should be # stringsRearrangement(inputArray) = false. # # There are 6 possible arrangements for these strings: # # ["aba", "bbb", "bab"] # ["aba", "bab", "bbb"] # ["bbb", "aba", "bab"] # ["bbb", "bab", "aba"] # ["bab", "bbb", "aba"] # ["bab", "aba", "bbb"] # None of these satisfy the condition of consecutive strings differing by 1 character, so the answer is false. # # For inputArray = ["ab", "bb", "aa"], the output should be # stringsRearrangement(inputArray) = true. # # It's possible to arrange these strings in a way that each consecutive pair of strings differ by 1 character (eg: "aa", "ab", "bb" or "bb", "ab", "aa"), so return true. # # Input/Output # # [execution time limit] 4 seconds (py3) # # [input] array.string inputArray # # A non-empty array of strings of lowercase letters. # # Guaranteed constraints: # 2 ≤ inputArray.length ≤ 10, # 1 ≤ inputArray[i].length ≤ 15. # # [output] boolean # # Return true if the strings can be reordered in such a way that each neighbouring pair of strings differ by exactly one character (false otherwise).
.ipynb_checkpoints/Plane_Problems_2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd input = pd.read_csv(r'C:\\Users\\ma7492\\Desktop\\AdventOfCode\\input_day1.txt',delimiter="\t") input.head() type(input) input.shape type(input.iloc[0]) input.iloc[4] row, col = input.shape print(row) print(col) count = 0 for i in range(row): if input['values'].iloc[i-1] < input['values'].iloc[i] : count += 1 else: count += 0 print('the input incremented', count, 'times')
AdventOfCode2021/day1/day1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + #import csv from the dataset import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set(font_scale=1) df=pd.read_csv('../final.csv') df[['SUB1','GENDER']].head() # - corr=df.loc[:,["SUB1","FREQ1","SUB2","FREQ2","EDUC","EMPLOY","ALCFLG","HERFLG","ALCDRUG","DSMCRIT","PREG","ARRESTS"]].corr()#["Survived"] plt.figure(figsize=(10, 20)) sns.heatmap(corr, vmax=.8, linewidths=0.01, square=True,annot=True,cmap='YlGnBu',linecolor="white") plt.title('Correlation between features'); plt.show() # plt.figure() # sns.pairplot(df[['ALCDRUG','FREQ2']], size=6,) #x_vars='ALCDRUG',y_vars='FREQ2' ) # plt.show() df['YEAR'].value_counts() import matplotlib matplotlib.style.use('ggplot') # + plt.figure() df5 = df[df['SUB1'].isin([2,5,4,10,7,3])] df5 = df5[df5['GENDER'].isin([1,2])] df5 = df5.groupby(['GENDER'])['SUB1'].value_counts().unstack()#.reset_index(name="count") print (df5.head(12)) plt.title('Number of Substance by Year') ax = plt.gca() # grab the current axis #ax.set_xticks([2,4,6]) # choose which x locations to have ticks #ax.set_xticklabels(["12-14","18-20","25-29","35-39","45-49","55 AND OVER"]) #plt.show() # set the labels to display at those ticks df6 = df5 # + print df6 df7 = df6.sum(axis=1) print df7 df8 = df6 for i in [2,5,4,10,7,3]: df8[i] = df6[i] / df7 df8.rename(columns={2:'ALCOHOL', 5:'HEROIN', 4:'MARIJUANA', 10:'METH', 7:'OPIATES', 3:'COCAINE'},inplace=True) #df8.rename(index={1:'MALE', 2:'FEMALE'},inplace=True) #print (df8) # + #df8.index.rename(['MALE', 'FEMALE'],inplace=True) #df8 = df8.transpose() #df8.rename(columns:{1:'MALE', 2:'FEMALE'},inplace=True) print df8 # + plt.figure() #df2['sub1_ratio'] = (df2.groupby(['YEAR'])['SUB1'] / df2.groupby(['YEAR']).sum()) ax = df8.plot(kind='bar') ax.set(xlabel="GENDER", ylabel="percent") ax.legend(bbox_to_anchor=(1, 1)) #df6.groupby(['YEAR']).plot.bar(x='SUB1', y='count') #df6.groupby(['YEAR','SUB1'])['SUB1'].sum().plot.bar() #df6.plot.bar(x='SUB1', y='count') ax.set_xticklabels(["MALE","FEMALE"]) #df6.plot(kind='bar') #pd.crosstab(df5['YEAR'],df5['SUB1']).plot.bar() plt.title('Top 6 Substance Abuse by Gender') plt.show() # + #print(df2['YEAR'].value_counts()) #print(df.groupby('SUB1')['SUB1'].value_counts().nlargest(9)) sub1_map = {2:'ALCOHOL', 5:'HEROIN', 4:'MARIJUANA', 10:'METH', 7:'OPIATES', 3:'COCAINE'} df3 = df[['GENDER','SUB1']] df3 = df3[df3['SUB1'].isin([2,5,4,10,7,3])] df3['SUB1'].replace(sub1_map, inplace=True) # + plt.figure() ax = df3.groupby(['GENDER'])['SUB1'].value_counts().plot.bar() ax.set(xlabel="substance", ylabel="count") #ax.legend(bbox_to_anchor=(1, 1)) plt.title('Overall Substance Abuse') #df3.groupby(['GENDER', 'SUB1']).sum().unstack().plot( kind='bar', stacked=True) plt.show() # - print df.query('STFIPS==39').query('DAYWAIT >= 365')[['DAYWAIT','CASEID']].head(5) #ohio = 39 print df.query('STFIPS==39').query('DAYWAIT >= 900')[['DAYWAIT','CASEID']].head(5)
week3/week3_linux.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # WeatherPy # ---- # # #### Note # * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. # ## Generate Cities List # ### Perform API Calls # * Perform a weather check on each city using a series of successive API calls. # * Include a print log of each city as it'sbeing processed (with the city number and city name). # # ### Convert Raw Data to DataFrame # * Export the city data into a .csv. # * Display the DataFrame # ## Inspect the data and remove the cities where the humidity > 100%. # ---- # Skip this step if there are no cities that have humidity > 100%. # Get the indices of cities that have humidity over 100%. # Make a new DataFrame equal to the city data to drop all humidity outliers by index. # Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data". # ## Plotting the Data # * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels. # * Save the plotted figures as .pngs. # ## Latitude vs. Temperature Plot # ## Latitude vs. Humidity Plot # ## Latitude vs. Cloudiness Plot # ## Latitude vs. Wind Speed Plot # ## Linear Regression # #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression # #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression # #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression # #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression # #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression # #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression # #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression # #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
WeatherPy/.ipynb_checkpoints/WeatherPy-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + code_folding=[] import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors import mpl_toolkits.mplot3d def init_x(step,start,end,dimension): #set the start&end coord, step size and dimension number pf input set xi=np.arange(start, end, step); x=xi #initialization of input xi for each dimension for i in range(dimension-1): x=np.vstack((np.around(x,decimals=9),np.around(xi,decimals=9))) #make x to d dimensions, from xi return x def func1(x): alpha=1000 #set alpha dim=x.shape[0] #dimension number y=0 for i in range(dim): y+=alpha**(i/(dim-1))*x[i]**2 return y x=init_x(0.001,-0.1,0.1,2) #x=init_x(0.01,-2,2,2) #print(x) X1,X2 = np.meshgrid(x[0], x[1]) #generate all the data point #print(X1,'\n',X2) dtsize=X1.shape[0] #data point number Y=np.zeros((dtsize,dtsize)) #initialize output results to 2D for i in range(dtsize): for j in range(dtsize): X=np.vstack((np.around(X1[i,j],decimals=9),np.around(X2[i,j],decimals=9))) #choose every combination of 2D inputs Y[i,j]=func1(X) #store the results print(Y) # - #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') Cset = plt.contourf(X1, X2, Y, levels=24, cmap='coolwarm_r') #c_lv=[0,0.001,0.002,0.003,0.004,0.006,0.008,0.010] #c_lv=10 #C = plt.contour(X1, X2, Y, levels=c_lv, colors='black') #plt.clabel(C, inline=True, fontsize=10) plt.colorbar(Cset) plt.tight_layout() plt.savefig('f1_2d.pdf') #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') plt.tight_layout() #ax.view_init(30, 30) plt.show() fig.savefig('f1_3d.pdf') # + def func1_1(x): alpha=1000 #set alpha dim=x.shape[0] #dimension number dtsize=x.shape[1] #data point number result=np.zeros(dim) print(dim,dtsize) for i in range(dim): result[i]=2*(alpha**(i/(dim-1)))*x[i] return result x=init_x(0.1,-15,15,3) #print(x[0][19],x[1][223],x[2][165]) X=np.vstack((np.vstack((x[0,19],x[1,223])),x[2,165])) print(X) Y=func1_1(X) #print(Y[0][19],Y[1][223],Y[2][165]) print(Y) def func1_2(x): alpha=1000 #set alpha dim=x.shape[0] #dimension number dtsize=x.shape[1] #data point number result=np.zeros((dim,dim)) for i in range(dim): result[i,i]=2*(alpha**(i/(dim-1))) return result Y=func1_2(X) print(Y) # + func2 = lambda x1,x2: (1-x1)**2 + 100*(x2-x1**2)**2; Y=func2(X1,X2) #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') line_lv = 33 c_lv = [0.85,0.9,0.95,1,1.1,1.2,1.3,1.5,1.7,1.9,2.2] plt.contourf(X1, X2, Y, levels=line_lv, cmap='coolwarm_r') C = plt.contour(X1, X2, Y, levels=c_lv, colors='black') plt.clabel(C, inline=True, fontsize=10) plt.tight_layout() fig.savefig('f2_2d_1.pdf') # - #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') plt.tight_layout() #ax.view_init(30, 30) plt.show() fig.savefig('f2_3d_1.pdf') x=init_x(0.01,-2,2,2) X1,X2 = np.meshgrid(x[0], x[1]) #generate all the data point Y=func2(X1,X2) #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') #lv=np.zeros(1) #lv=np.append(lv,np.logspace(1,12,12,base=2)) lv=[0,1,2,4,8,16,32,64,128,256,512,1024,2048,3500] Cset = plt.contourf(X1, X2, Y, levels=lv, norm=colors.PowerNorm(gamma=0.25), cmap='coolwarm_r') #C = plt.contour(X1, X2, Y, levels=lv, colors='black') #plt.clabel(C, inline=True, fontsize=7) plt.colorbar(Cset) plt.tight_layout() fig.savefig('f2_2d_2.pdf') #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,norm=colors.PowerNorm(gamma=0.25),cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') ax.view_init(30, 30) plt.show() fig.savefig('f2_3d_2.pdf') # + #func2 = lambda x1,x2: (1-x1)**2 + 100*(x2-x1**2)**2; func2_1 = lambda x: (2*x[0]-2-400*x[0]*(x[1]-x[0]**2),200*(x[1]-x[0]**2)); #x=init_x(0.001,-0.1,0.1,2) x=init_x(0.01,-2,2,2) #print(x) ''' X1,X2 = np.meshgrid(x[0], x[1]) #generate all the data point #print(X1,'\n',X2) dtsize=X1.shape[0] #data point number dim=2 Y=np.zeros((dtsize,dtsize,dim)) #initialize output results to 2D for i in range(dtsize): for j in range(dtsize): #X=np.vstack((np.around(X1[i,j],decimals=9),np.around(X2[i,j],decimals=9))) #choose every combination of 2D inputs temp=func2_1(X1[i,j],X2[i,j]) for k in range(dim): Y[i,j,k]=temp[k] #store the results #print(Y[:,:,0]) #partial x1 #print(Y[:,:,1]) #partial x2 ''' #print(x[0,297],x[1,58]) X=np.vstack((x[0,297],x[1,58])) print(X) Y=func2_1(X) #print(Y[0][3],Y[1][3]) print(Y) # + func2_2_det = lambda x: (2-400*x[1]+1200*x[0]**2)*200-(-400*x[0])**2; func2_2 = lambda x: (2-400*x[1]+1200*x[0]**2,-400*x[0],-400*x[0],200); print(X) Y=func2_2(X) #print(Y[0][3],Y[1][3],Y[2][3],Y[3]) print(Y) # + def func3(x): epsilon=10**(-16) #set epsilon return np.log(epsilon+func1(x)) x=init_x(0.001,-0.1,0.1,2) #x=init_x(0.01,-2,2,2) X1,X2 = np.meshgrid(x[0], x[1]) #generate all the data point dtsize=X1.shape[0] #data point number Y=np.zeros((dtsize,dtsize)) #initialize output results to 2D for i in range(dtsize): for j in range(dtsize): X=np.vstack((np.around(X1[i,j],decimals=9),np.around(X2[i,j],decimals=9))) #choose every combination of 2D inputs Y[i,j]=func3(X) #store the results print(Y) # - #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') lv=10 Cset = plt.contourf(X1, X2, Y, levels=lv, norm=colors.PowerNorm(gamma=3), cmap='coolwarm_r') plt.colorbar(Cset) plt.tight_layout() fig.savefig('f3_2d.pdf') #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,norm=colors.PowerNorm(gamma=1.1),cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') #ax.view_init(30, 30) plt.show() fig.savefig('f3_3d.pdf') # + def func3_1(x): alpha=1000 epsilon=10**(-16) dim=x.shape[0] #dimension number result=np.zeros(dim) for i in range(dim): result[i]=(2*alpha**(i/(dim-1))*x[i])/(epsilon+func1(x)) return result def func3_2(x): alpha=1000 #set alpha epsilon=10**(-16) #set epsilon dim=x.shape[0] #dimension number result=np.zeros((dim,dim)) for i in range(dim): for j in range(dim): a=alpha**((i+j)/(dim-1)) if(i==j): result[i,j]=(2*a*(epsilon+func1(x))-(2*a*x[i])**2)/(epsilon+func1(x))**2 else: result[i,j]=(-4*a*x[i]*x[j])/(epsilon+func1(x))**2 return result x=init_x(0.1,-15,15,3) #print(x[0][19],x[1][223],x[2][165]) X=np.vstack((np.vstack((x[0,19],x[1,223])),x[2,165])) print(X) Y=func3_1(X) print(Y) Y=func3_2(X) print(Y) # + q=10**8 #funch = lambda x: np.log(1+np.exp(q*x))/q; funch = lambda x: (np.log(1+np.exp(-np.absolute(q*x)))+np.maximum(q*x,0))/q def func4(x): dim=x.shape[0] #dimension number y=0; for i in range(dim): y+=funch(x[i])+100*funch(-x[i]) return y def func5(x): dim=x.shape[0] #dimension number y=0; for i in range(dim): y+=funch(x[i])**2+100*funch(-x[i])**2 return y #x=init_x(0.01,-0.0000001,0.0000001,2) x=init_x(0.001,-0.1,0.1,2) X1,X2 = np.meshgrid(x[0], x[1]) #generate all the data point dtsize=X1.shape[0] #data point number Y=np.zeros((dtsize,dtsize)) #initialize output results to 2D for i in range(dtsize): for j in range(dtsize): X=np.vstack((np.around(X1[i,j],decimals=9),np.around(X2[i,j],decimals=9))) #choose every combination of 2D inputs Y[i,j]=func4(X) #store the results #print(Y) #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') lv=10 Cset = plt.contourf(X1, X2, Y, levels=lv, norm=colors.PowerNorm(gamma=1), cmap='coolwarm_r') plt.colorbar(Cset) plt.tight_layout() fig.savefig('f4_2d.pdf') # - #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,norm=colors.PowerNorm(gamma=0.25),cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') ax.view_init(30, 30) plt.show() fig.savefig('f4_3d.pdf') # + def func4_1(x): q=10**8 dim=x.shape[0] #dimension number result=np.zeros(dim) for i in range(dim): result[i]=(np.exp(q*x[i]))/(1+np.exp(q*x[i]))-100*(np.exp(-q*x[i])/(1+np.exp(-q*x[i]))) return result def func4_2(x): q=10**8 dim=x.shape[0] #dimension number result=np.zeros((dim,dim)) for i in range(dim): for j in range(dim): if(i==j): result[i,j]=(101*q*np.exp(-q*x[i]))/(1+np.exp(-q*x[i]))**2 else: result[i,j]=0 return result x=init_x(0.00000001,-0.0000015,0.0000015,3) #print(x[0][19],x[1][223],x[2][165]) X=np.vstack((np.vstack((x[0,19],x[1,223])),x[2,165])) print(X) Y=func4_1(X) print(Y) Y=func4_2(X) print(Y) # - Y=np.zeros((dtsize,dtsize)) #initialize output results to 2D for i in range(dtsize): for j in range(dtsize): X=np.vstack((np.around(X1[i,j],decimals=9),np.around(X2[i,j],decimals=9))) #choose every combination of 2D inputs Y[i,j]=func5(X) #store the results #print(Y) #plot in 2D with color fig, ax = plt.subplots() ax.set_xlabel('X1') ax.set_ylabel('X2') lv=10 Cset = plt.contourf(X1, X2, Y, levels=lv, norm=colors.PowerNorm(gamma=1), cmap='coolwarm_r') plt.colorbar(Cset) plt.tight_layout() fig.savefig('f5_2d.pdf') #plot in 3D fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(X1,X2,Y,norm=colors.PowerNorm(gamma=0.25),cmap=plt.cm.coolwarm_r) ax.set_xlabel('X1') ax.set_ylabel('X2') ax.set_zlabel('Y') ax.view_init(30, 30) plt.show() fig.savefig('f5_3d.pdf')
1-functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="oKvOFVJLu3tf" # # Apache Beam Word Count Example # # The example is adopted from https://beam.apache.org/get-started/wordcount-example/ for Google Colab # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://github.com/Building-ML-Pipelines/building-machine-learning-pipelines/blob/master/chapters/intro_tfx/Apache_beam_example_notebook.ipynb) # # + colab={} colab_type="code" id="rAh3av_ovF2y" # !pip install -q apache_beam[gcp] # + colab={} colab_type="code" id="m9BExktyu2yM" import re import apache_beam as beam from apache_beam.io import ReadFromText from apache_beam.io import WriteToText from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions input_file = "gs://dataflow-samples/shakespeare/kinglear.txt" output_file = "/content/output.txt" # TODO explain these lines pipeline_options = PipelineOptions() # pipeline_options.view_as(SetupOptions).save_main_session = True with beam.Pipeline(options=pipeline_options) as p: # Read the text file[pattern] into a PCollection. lines = p | ReadFromText(input_file) # Count the occurrences of each word. counts = ( lines | 'Split' >> (beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))) # .with_output_types(unicode)) | 'PairWithOne' >> beam.Map(lambda x: (x, 1)) | 'GroupAndSum' >> beam.CombinePerKey(sum)) # Format the counts into a PCollection of strings. def format_result(word_count): (word, count) = word_count return '%s: %s' % (word, count) output = counts | 'Format' >> beam.Map(format_result) # Write the output using a "Write" transform that has side effects. output | WriteToText(output_file) # + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="-xbljzhfvJVH" outputId="fd19137c-52e4-4442-b597-b0a23a2bf3bb" # !head /content/output.txt* # + colab={} colab_type="code" id="U0skCBIkwT9m"
chapters/intro_tfx/Apache_beam_example_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: dev # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Machine Learning- Exoplanet Exploration # #### Extensive Data Dictionary: https://exoplanetarchive.ipac.caltech.edu/docs/API_kepcandidate_columns.html # # Highlightable columns of note are: # # * kepoi_name: A KOI is a target identified by the Kepler Project that displays at least one transit-like sequence within Kepler time-series photometry that appears to be of astrophysical origin and initially consistent with a planetary transit hypothesis # # * kepler_name: [These names] are intended to clearly indicate a class of objects that have been confirmed or validated as planets—a step up from the planet candidate designation. # # * koi_disposition: The disposition in the literature towards this exoplanet candidate. One of CANDIDATE, FALSE POSITIVE, NOT DISPOSITIONED or CONFIRMED. # # * koi_pdisposition: The disposition Kepler data analysis has towards this exoplanet candidate. One of FALSE POSITIVE, NOT DISPOSITIONED, and CANDIDATE. # # * koi_score: A value between 0 and 1 that indicates the confidence in the KOI disposition. For CANDIDATEs, a higher value indicates more confidence in its disposition, while for FALSE POSITIVEs, a higher value indicates less confidence in that disposition. # # + # # Update sklearn to prevent version mismatches # # !pip install sklearn --upgrade # # install joblib # # !pip install joblib # - # ### Import Dependencies # + # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd # Hide warning messages in notebook import warnings warnings.filterwarnings('ignore') # - # # Read the CSV and Perform Basic Data Cleaning # + # Read/Load CSV file df = pd.read_csv("exoplanet_data.csv") # Drop the null columns where all values are null df = df.dropna(axis='columns', how='all') # Drop the null rows df = df.dropna() df.head() # - # ## Basic Statistic Details df.describe() # # Select Features (columns) # * Feature Selection: Removing irrelevant feature results in better performing model that is easeir to understands & model runs faster # target_names = df["koi_disposition"].unique() #target_names print(df["koi_disposition"].unique()) # + # Assign X (Independant data) and y (Dependant target) # Set X equal to the entire data set, except for the first column X = df.iloc[:, 1:] # X.head() # Set y equal to the first column y = df.iloc[:,0].values.reshape(-1, 1) # y.head() # + from sklearn.ensemble import ExtraTreesClassifier # Search for top 10 features according to feature importances model = ExtraTreesClassifier() model.fit(X,y) model.feature_importances_ # sorted(zip(model.feature_importances_, X), reverse=True) # - # Store the top (10) features as a series, using the column headers as the index top_feat = pd.Series(model.feature_importances_, index=X.columns).nlargest(10) top_feat # + # Set features based on feature importances X = df[top_feat.index] # Use `koi_disposition` for the y values y = df['koi_disposition'] #y = df['koi_disposition'].values.reshape(-1, 1) # - # # Create a Train Test Split # + from sklearn.model_selection import train_test_split # Split the data into smaller buckets for training and testing X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) X_train.head() # - # X and y Train shape have 5243 rows (80% of data) X_train.shape, y_train.shape # X and y Test shape have 1748 rows (20% of data) X_test.shape, y_test.shape # # Pre-processing # # Scale the data using the MinMaxScaler # # MinMaxScaler: # * A way to normalize the input features/variables # * Features will be transformed into the range # * Scales the range of fetures from 0 to 1 # # + from sklearn.preprocessing import MinMaxScaler # Create a MinMaxScaler model and fit it to the training data X_scaler = MinMaxScaler().fit(X_train) #Transform the training and testing data using the X_scaler X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) #print(np.matrix(X_test_scaled)) # - # # Train the Model # * Used Logistic Regression Model # # # + from sklearn.linear_model import LogisticRegression # Create a Logistic Regression Model model = LogisticRegression() # Create a Logistic Regression Model model = SVC(kernel='linear') # Train (Fit) the model to the data model.fit(X_train_scaled, y_train) # Score/Validate the model using the test data print(f"Training Data Score: {'%.3f' % model.score (X_train_scaled, y_train)}") print(f"Testing Data Score: {'%.3f' % model.score(X_test_scaled, y_test) }") # Printed the r2 score for the test data, testing is lower than training which is good we are not over feeding # - # ## Model Accuracy # + # Predicting the Test set results y_predic = model.predict(X_test) # Making the confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_predic) from sklearn.metrics import accuracy_score accuracy = accuracy_score(y_test, y_predic) print('Test Model Accurracy: %.3f' % (accuracy)) # - # ## Prediction # + predictions = model.predict(X_test_scaled) # print(f"first 10 Predictions{predictions[:10].tolist()}") # print(f"first 10 Actual{y_test[:10].tolist()}") # Printing into a Dataframe (y_test can't be reshap on top) df_pred = pd.DataFrame({"Actual":y_test, "Predicted":predictions}) df_pred.head() # - # # Hyperparameter Tuning # # Use `GridSearchCV` to tune the model's parameters # Check Logistic Regression parameters that can be used for Tuning model = LogisticRegression() model # + from sklearn.model_selection import GridSearchCV param_grid = {'C': [1, 5, 25, 45, 65], 'max_iter': [100, 300, 500, 700, 1000]} grid = GridSearchCV(model, param_grid, verbose=3) # + # Train the model with GridSearch grid.fit(X_train_scaled, y_train) # + # List the best parameters for this dataset print(grid.best_params_) # List the best score print('Best Score: %.3f' % grid.best_score_) # - # Score the model print('Model Score: %.3f' % grid.score(X_test_scaled, y_test)) # Make predictions with the hypertuned model predictions = grid.predict(X_test_scaled) df_grid = pd.DataFrame({"Actual":y_test, "Predicted":predictions}) df_grid.head() # + # Calculate classification report # print(np.array(y_test)) from sklearn.metrics import classification_report print(classification_report(y_test, predictions, target_names=target_names)) # - # # Save the Model import joblib filename = 'LogisticRegression.sav' joblib.dump(LogisticRegression, filename)
Models/logistic_regression.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # Lo siguiente está basado en los libros: # # * <NAME>, Pensando Antes de Actuar: Fundamentos de Elección Racional, 2009 # # * <NAME>, Introduction to Probability and Statistics Using R, 2014. # # El libro de <NAME> tiene github: [jkerns/IPSUR](https://github.com/gjkerns/IPSUR) # # **Nota:** # # * En algunas líneas no es necesario colocar `print` y sólo se ha realizado para mostrar los resultados de las funciones en un formato similar al de R pues la nota se escribió con *jupyterlab* y *R*. library(ggplot2) options(repr.plot.width=2.5, repr.plot.height=2.5) #esta línea sólo se ejecuta para jupyterlab con R # # Motivación # En el análisis estadístico de datos de poblaciones aparecen ciertas distribuciones con mucha frecuencia. En lo que continúa se estudiarán tres de las más comunes: la distribución uniforme (discreta y continua), que representa datos equiprobables, la distribución binomial y la distribución normal, que están íntimamente relacionadas y podemos pensar a la segunda (normal) como la versión continua de la primera (binomial). Ésta última es muy importante en la práctica y en la teoría. # Si $X$ es una variable aleatoria con función de distribución -o densidad en el caso continuo- $f$, entonces decimos que los datos representados por los valores que toma $X$ tienen una distribución dada por $f$. # ## Caso discreto # En el caso discreto, en el cual $X$ toma valores $x_1,x_2,\dots,x_n$ (soporte de $X$, $S_X$), éstos representan datos con media: $$\mu = E(X) = \displaystyle \sum_{i=1}^nx_if(x_i)$$ # en donde, recordemos que $f(x_i) = P(X = x_i)$ es la probabilidad de que $X$ tome el valor $X_i$. Asimismo, la varianza está dada por: $$\sigma^2=E((X-\mu)^2) = \sum_{i=1}^n(x_i-\mu)^2f(x_i)$$ # que es el valor esperado de las desviaciones -al cuadrado- alrededor de la media. Decimos que la distribución $f$ tiene media $\mu$ y varianza $\sigma^2$. Claramente la desviación estándar es simplemente $\sigma = \sqrt{\sigma^2}$. # # **Obs:** # # * La varianza puede ser calculada con la fórmula: $\sigma^2 = E(X^2)-(E(X))^2$. # ## Caso continuo # En el caso continuo, en el cual $X$ toma valores en un intervalo de la forma $[a,b]$ o $(a,b)$ (soporte de $X$, $S_X$) o uniones de estos intervalos, éstos representan datos con media: $$\mu=E(X)=\displaystyle \int_{X \in S}xf(x)dx.$$ # si $\displaystyle \int_{X \in S}|x|f(x)dx < \infty$ (es finita). # Asimismo, la varianza está dada por: $$\sigma^2=E((X-\mu)^2) = \int_{X \in S}(x-\mu)^2f(x)dx,$$ # y la desviación estándar es: $\sigma = \sqrt{\sigma^2}$. # # Uniforme # Cuando los datos de un conjunto **ocurren, cada uno con la misma frecuencia**, decimos que están uniformemente distribuidos. Éste es el caso de la distribución de probabilidad determinada por una **variable aleatoria sobre un espacio muestral de datos equiprobables**. Por ejemplo, los que resultan de lanzar una moneda, lanzar un dado, girar una ruleta o bien, elegir un número al azar en el intervalo $[0, 1]$. La distribución que resulta, ya sea en el caso discreto o continuo, se conoce como distribución uniforme. # ## Su distribución de probabilidad: caso discreto # En el caso discreto, dada una variable aleatoria $X$ que toma valores $\{1, 2, \dots , n\}$, cada uno con probabilidad $\frac{1}{n}$ , la distribución de probabilidad queda dada por: $$f(i) = P(X=i)= \frac{1}{n} \forall i=1,2,\dots,n.$$ # ### Su gráfica n<-10 valores<-1:n probabilidad<-.7 qplot(valores,probabilidad,main='Uniforme discreta') #qplot es quick plot # **Propiedades:** # # * $\mu = \displaystyle \sum_{i=1}^ni\frac{1}{n} = \frac{n+1}{2}$. # # * $\sigma = \sqrt{\displaystyle \sum_{i=1}^n(i-\mu)^2\frac{1}{n}} = \sqrt{\frac{n^2-1}{12}}$. # ## Su función de densidad: caso continuo # Para el caso continuo, considérese el caso de elegir un número al azar en el intervalo $[a,b]$ de la recta numérica. Sea $X$ la variable aleatoria asociada que toma cada uno de los valores del intervalo. La densidad de probabilidad correspondiente debe ser tal que el área bajo la gráfica sea la unidad y dados números $c$ y $d$ en el intervalo $[a, b]$, con $c < d$, la probabilidad: $$P(c < X < d) = \frac{d-c}{b-a}$$ es el área por debajo de la gráfica entre $c$ y $d$. # Podemos definir esta función de densidad como: $$f(x) = \begin{cases} # \frac{1}{n} & \text{si } a \leq x \leq b,\\ # 0 & \text{en otro caso} # \end{cases}$$ # La media y varianza están dadas por: $$\mu=\frac{a+b}{2},$$ # $$\sigma^2=\frac{(b-a)^2}{12}.$$ # ### Su gráfica set.seed(2000) n_runif<-500 #cantidad de números que queremos a<- -2 #extremo izquierdo del intervalo b<-3 #extremo derecho del intervalo vec<-seq(a,b,by=.01) y<-dunif(vec,a,b) #evaluamos vec en la función de densidad en vec #esta evaluación es igual a 1/(b-a) df <- data.frame(valores=vec,probabilidad=y) gf<-ggplot(df, aes(x=vec,y=probabilidad)) gf + geom_point(size=.1) + ylim(0, 1/(b-a)+.3)+ ggtitle('Uniforme continua')+ xlab('runif') # El área debajo de la curva es: $$(b-a)\frac{1}{b-a} = 1.$$ # Y la probabilidad de que $X$ esté entre $0$ y $2$ es el área formada en el rectángulo: gf + geom_point(size=.1) + ylim(0, 1/(b-a)+.3) + geom_segment(aes(x = 0, y = 0, xend = 0, yend = 1/(b-a))) + geom_segment(aes(x = 2, y = 0, xend = 2, yend = 1/(b-a))) + xlab('runif') # ## Ejemplos # 1) Sea $X$ la variable aleatoria uniforme que representa el resultado observado al lanzar un dado. La probabilidad de observar del número $3$ en adelante es de: $$P(X\geq 3) = \displaystyle \sum_{i=3}^6P(X=i)=\sum_{i=3}^6\frac{1}{6}=\frac{4}{6}=\frac{2}{3}.$$ # El lanzamiento medio es de: $$\mu = \displaystyle \sum_{i=1}^6 i\frac{1}{6} = \frac{1+6}{2} = \frac{7}{2} $$ # con desviación estándar: # $$\sigma = \sqrt{\frac{(6-1)^2}{12}} \approx 1.4.$$ # 2) Sea $X$ el tiempo de espera (en minutos) en un consultorio dental y supongamos que éste sigue una distribución uniforme descrita por la función de densidad, $$f(x) = \begin{cases} # \frac{1}{90} & \text{si } 0 \leq x \leq 90,\\ # 0 & \text{en otro caso} # \end{cases}$$ # La probabilidad de que un paciente espere entre $15$ y $45$ minutos está dada por: $$P(15 < X < 45) = \frac{45-15}{90-0} = \frac{1}{3}.$$ # **en R ...** # Obsérvese que: # $$P(15 < X < 45) = 1 - P((15 < X < 45)^c) = 1 - P( (15 < X)^c \cup (X <45)^c)=$$ # $$1 - P( X \leq 15 \cup 45 \leq X) = 1 - (P(X \leq 15) + P( 45 \leq X))$$ # y la función `punif` nos calcula $P(X \leq 15)$ y si la volvemos a usar pero con el argumento `lower.tail=F` nos calcula $P(45 < X)$ que sí nos sirve pues como $X$ es **continua**: $P(X=45)=0$ (esto último no es igual para las variables discretas). ext_izq <- 15 ext_der <- 45 a<-0 b<-90 1-punif(ext_izq,a,b)-punif(ext_der,a,b, lower.tail = F) #mismo resultado si se #calcula: #1-punif(15,0,90)-punif(45,0,90) #pues es una uniforme # El tiempo medio de espera es de: $$\mu = \frac{0+90}{2} = 45 \text{ minutos}$$ # con desviación estándar: $$\sigma = \sqrt{\frac{(90-0)^2}{12}} \approx 26 \text{ minutos}.$$ # # Binomial # La distribución binomial se basa en ensayos *Bernoulli* por ejemplo: # # * Se lanza un dado 10 veces, ¿cuál es la probabilidad de obtener 4 seises? # # * Un nuevo medicamento para la diabetes mantiene el nivel de glucosa en la sangre del paciente, muy cercano a los niveles normales. Desgraciadamente, el $10\%$ de los pacientes que reciben este medicamento desarrolla hipertensión arterial. Un médico tiene $5$ pacientes diabéticos -sin relación alguna entre ellos- y desea saber la probabilidad de que todos ellos desarrollen hipertensión arterial si son tratados con el medicamento. # Estas dos situaciones describen ejemplos de lo que se conoce como un **experimento binomial**: que consiste de $n$ **etapas o ensayos idénticos e independientes, llamados ensayos** *Bernoullli*, tales que: # # * En cada ensayo o etapa hay **dos posibles resultados**: éxito y fracaso (podríamos llamarlos positivo y negativo, o $1$ y $0$). # # * La **probabilidad de éxito** en cada ensayo es de $p$ y la de fracaso de $1-p$. # # La variable aleatoria discreta $X$ que asociamos a este experimento es el número de éxitos en los $n$ ensayos. En las situaciones descritas tenemos que, # # * $X$ cuenta el número de seises en $10$ lanzamientos de un dado. # # * $X$ cuenta el número de pacientes, del grupo de $5$ pacientes, que desarrollan hipertensión arterial. # ## ¿Cómo obtenemos una distribución binomial? # Pensemos ahora en lanzar un dado $n$ veces. Queremos calcular la probabilidad de que salgan -exactamente- $k$ cuatros en estos $n$ lanzamientos. # # * El espacio de resultados o espacio muestral $S$ lo podemos representar como un conjunto de $n$ elementos: $\{l_1,l_2,\dots,l_n\}$ con cada $l_i$ lanzamiento $i$, por lo que se tienen $6^n$ posibilidades. # # * Si queremos que salgan $k$ cuatros, éstos pueden aparecer distribuidos de diferentes formas en los $n$ lanzamientos. Como no nos interesa el orden existen $\binom{n}{k}$ formas posibles de realizar esto. Por ejemplo, si $k = 2$ y $n = 4$ los dos cuatros pueden aparecer en los siguientes subconjuntos de lanzamientos: $\{l_1, l_2\}, \{l_1, l_3\}, \{l_1, l_4\}, \{l_2, l_3\}, \{l_2, l_4\} y \{l_3, l_4\}$. # # * Una vez elegidos los cuatros, en los otros lanzamientos pueden aparecer cualquiera de los $5$ números restantes, por lo que tenemos $5^{n-k}$ posibilidades. En resúmen, hay $\binom{n}{k}5^{n-k}$ diferentes formas de obtener $k$ cuatros en $n$ lanzamientos de un dado. # # * Si $E=${obtener $k$ cuatros en $n$ lanzamientos} entonces: $$P(E) = \frac{\binom{n}{k}5^{n-k}}{6^n} = \binom{n}{k}\frac{1}{6}^k\frac{5^{n-k}}{6^{n-k}}=\binom{n}{k}\left(\frac{1}{6}\right)^k\left(\frac{5}{6}\right)^{n-k},$$ y la última igualdad es básicamente: $\binom{n}{k}P(\{\text{obtener cuatro}\})P(\{\text{no obtener cuatro}\})$. # # * Se interpreta el evento: “obtener un cuatro”, como el éxito y a “no obtener un cuatro” como el fracaso. Los lanzamientos son los ensayos del experimento binomial. # ## Su distribución de probabilidad # En resúmen: sea $X$ la variable aleatoria asociada a un experimento binomial y $p$ la probabilidad de éxito, entonces, si $E$ representa el evento "éxito" y $F$ representa el evento ·fracaso": $$P(X=k) = \binom{n}{k}P(E)^{k}P(F)^{n-k}=\binom{n}{k}p^k(1-p)^{n-k}$$ # **Comentarios:** # # * La distribución anterior se conoce con el nombre de **distribución binomial**. # # * Si representamos a los éxitos y fracasos, respectivamente por los números $1$ y $0$ entonces, la variable binomial consiste en contar el número de éxitos en los $n$ ensayos. # ## Propiedades # * La media y la desviación estándar de la distribición binomial con parámetros $n$ y $p$ están dadas por: # $$\mu = np.$$ # $$\sigma = \sqrt{np(1-p)}.$$ # * Dado un número $n$ de ensayos, la varianza tomará su valor máximo cuando el producto $p(1-p)$ sea máximo y esto sucede para $p = 1$. Esto nos dice que: **la máxima dispersión en una distribución binomial ocurre cuando** $p=\frac{1}{2}$. # ## Su gráfica # + set.seed(2000) n_rbinom <- 500 #cantidad de números que queremos n<-10 #número de ensayos p1<-0.5; p2<-0.2; p3<-0.8 #diferentes probabilidades de éxito #para comparación valores1<-rbinom(n_rbinom, n, p1) valores2<-rbinom(n_rbinom, n, p2) valores3<-rbinom(n_rbinom, n, p3) gf1<-ggplot(data.frame(valores=valores1,probabilidad=p1),aes(x=valores1, y=..density..)) gf2<-ggplot(data.frame(valores=valores2,probabilidad=p2),aes(x=valores2, y=..density..)) gf3<-ggplot(data.frame(valores=valores3,probabilidad=p3),aes(x=valores3, y=..density..)) # - gf1 + geom_histogram(bins = 10) + ggtitle('Binomial, n=10, p=0.5') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('rbinom') # **Obsérvese que es simétrica la distribución en este caso.** gf2 + geom_histogram(bins = 7) + ggtitle('Binomial, n=10, p=0.2') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('rbinom') # **Obsérvese que está sesgada a la derecha la distribución en este caso.** gf3 + geom_histogram(bins = 7) + ggtitle('Binomial, n=10, p=0.8') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('rbinom') # **Obsérvese que está sesgada a la izquierda la distribución en este caso.** # ## Ejemplos # 1) La probabilidad de obtener $4$ seises en $10$ lanzamientos de un dado se calcula poniendo: $n = 10,k = 4$ y $p = \frac{1}{6}$ para obtener: $$P(X=4) = \binom{10}{4}\left(\frac{1}{6}\right)^4\left(\frac{5}{6}\right)^{10-4} \approx 0.0543.$$ # **en R...** dbinom(4,10,1/6) # El número medio de seises que se obtienen en $10$ lanzamientos es de: # $$\mu = np = 10*\frac{1}{6} = \frac{5}{3} \approx 1.67.$$ # con desviación estándar: # $$\sigma = \sqrt{np(1-p)} = \sqrt{10*\frac{1}{6}*\frac{5}{6}} \approx 1.18.$$ # 2) La probabilidad de que los $5$ pacientes tratados con el medicamento desarrollen hipertensión arterial se calcula poniendo: $n = 5, k = 5$ y $p = 0.1$ para obtener: $$P(X=5)=\binom{5}{5}(0.1)^5(0.9)^0 \approx .00001.$$ # **en R ...** dbinom(5,5,.1) # El número medio de pacientes que desarrollan hipertensión arterial es de: $$ \mu=np=5*0.1=.5$$ # con desviación estándar: $$\sigma=\sqrt{np(1-p)} = \sqrt{5*0.1*0.9} \approx 0.67$$ # 3) Igual que el ejemplo $1$ pero ahora se utiliza $Y = \frac{X}{n}$, entonces $Y$ mide la **proporción** de seises en $n=10$ lanzamientos de un dado. Por las propiedades de media y desviación estándar se tiene: # # * El número medio de la proporción de seises que se obtienen en $10$ lanzamientos es de: $$\mu_Y = \frac{\mu_X}{n} = \frac{np}{n} = p = \frac{1}{6}.$$ # * La desviación estándar de la proporción de seises que se obtienen en $10$ lanzamientos es de: $$\sigma_Y = \frac{\sigma_X}{n} = \frac{\sqrt{np(1-p)}}{n} = \sqrt{\frac{np(1-p)}{n^2}} = \sqrt{\frac{p(1-p)}{n}} \approx \frac{1.18}{10}\approx .118$$ # **Obs:** como $\mu_Y = p$, entonces $Y$ (proporción) se llama estimador insesgado. # **Comentario:** # # * Un experimento binomial puede representarse por la siguiente urna con canicas: en la urna colocamos $r$ canicas rojas y $a$ azules. El experimento consiste en extraer repetidamente canicas de la urna, con remplazo, de manera que la urna siempre contiene las mismas canicas. Si al extraer una canica al azar, ésta es roja, entonces se contabiliza como un éxito. La probabilidad de éxito está dada por la **proporción de canicas rojas**, es decir: $$p=\frac{r}{r+a}.$$ # # Supongamos que no sabemos de antemano la proporción de canicas de cada color, por lo tanto desconocemos $p$. Para tratar de averiguar esta proporción empezamos a extraer canicas con remplazo. Sea $n$ el número de veces que repetimos este proceso de extracción y $k$ el número de canicas rojas o éxitos que obtenemos. *<NAME>li* se dio cuenta de que si $n$ era suficientemente grande, entonces la proporción $\frac{k}{n}$ se parecía cada vez más a la probabilidad p, es decir, a la proporción real de canicas. La prueba formal de este hecho, que nos puede parecer trivial, es uno de los grandes teoremas de la Estadística por su importancia para la inferencia y se conoce como el **Teorema de Bernoulli o la Ley de los Grandes Números**. # # Normal # **Un poco de historia...** # El matemático y estadístico francés Abraham de Moivre (1667 - 1754) notó que cuando la distribución binomial involucraba una gran número de ensayos (por ejemplo la probabilidad de obtener al menos $60$ caras iguales al lanzar una moneda $100$ veces), su forma podía reproducirse con una curva suave en forma de campana. set.seed(2000) vec <- seq(-3,3,by=.1) y <- dnorm(vec) #evaluamos la función de densidad en vec qplot(vec,y,geom='line') + #qplot es quickplot ggtitle('Normal estandar') + theme(plot.title = element_text(size=10, hjust = 0.5)) + xlab('rnorm') set.seed(2000) n_rbinom<-1000 #cantidad de números que queremos n1<-60 #número de ensayos p<-1/2 #proba de éxito vec1 <- rbinom(n_rbinom,n1,p) y1 <- dbinom(vec1,n1,p) #evaluamos la distribución #de probabilidad en vec #Incrementamos el número de ensayos n2<-100 vec2 <- rbinom(n_rbinom,n2,p) y2 <- dbinom(vec2,n2,p) gf1 <- ggplot(data.frame(x=vec1,y=y1),aes(x=vec1,y=..density..)) gf2 <- ggplot(data.frame(x=vec2,y=y2),aes(x=vec2,y=..density..)) gf1 + geom_histogram(bins=9)+ ggtitle('n=60,p=.5') + theme(plot.title = element_text(size=10, hjust = 0.5))+ xlab('rbinom') + geom_smooth(aes(x=vec1,y=y1), colour='red', size=.5) gf2 + geom_histogram(bins=10)+ ggtitle('n=100,p=.5') + theme(plot.title = element_text(size=10, hjust = 0.5))+ xlab('rbinom') + geom_smooth(aes(x=vec2,y=y2), colour='red', size=.5) # Y encontró una fórmula para aproximar las probabilidades binomiales cuando se tenía un gran número de ensayos. Esta curva fue descubierta en forma independiente por <NAME> (1749 - 1827), en 1778 y por el alemán <NAME> en 1809. Hoy en día también es conocida como la distribución Gaussiana o campana de Gauss. Entre otras aportaciones, Gauss observó y estudió que los errores en mediciones suelen seguir una distribución normal y los errores pequeños (que producen subestimación de la cantidad real medida) ocurren con mayor frecuencia que los grandes (que producen sobrestimación). # **Comentario:** otros datos que típicamente están distribuidos normal: las calificaciones de un grupo de estudiantes, el rendimiento de una vaca lechera, la altura de los individuos de una población, el nivel de consumo de un producto, el tiempo dedicado para tomar ciertas decisiones... # ## Su función de densidad # Está en términos de su $\mu$ y su $\sigma$ y es: $$f(x) = \frac{1}{\sigma\sqrt{2\pi}}exp\left(-\frac{1}{2}\left(\frac{x-\mu}{\sigma}\right)^2\right)$$ # La media $\mu$ da el centro de la curva. La desviación estándar $\sigma$, determina el ancho de la misma. # La distribución normal estándar es aquella en la que $\mu = 0 , \sigma = 1$: $$f(x) = \frac{1}{\sqrt{2\pi}}exp\left(-\frac{x^2}{2}\right)$$ # + set.seed(2000) vec <- seq(-5,8,by=.1) #cantidad de números que queremos y1 <- dnorm(vec) #evaluamos la función de densidad en vec y2 <- dnorm(vec,1,1) y3 <- dnorm(vec,1,2) #media 1 y desviación estándar 2 df1<-data.frame(values=y1,normal=rep('estandar',NROW(y1))) df2<-data.frame(values=y2,normal=rep('media y desv_est 1',NROW(y2))) df3<-data.frame(values=y3,normal=rep('media 1 y desv_est 2',NROW(y3))) df<-rbind(df1,df2,df3) # - head(df) tail(df) gf<-ggplot(df,aes(x=rep(vec,3),y=values, group=normal,color=normal)) options(repr.plot.width=4.5, repr.plot.height=3.5) #esta línea sólo se ejecuta para jupyterlab con R gf + geom_line() + xlab('') + ylab('dnorm') # ## Propiedades # * Moda, media y mediana son iguales por su simetría. # # * El $68\%$ de los datos se encuentran a una distancia de $\sigma$ de la media, el $95\%$ a una distancia de $1.96\sigma$ ($\approx 2\sigma$) y el $99\%$ a una distancia de $2.58\sigma$ ($\approx 3\sigma$). # (dibujo) # * Si $X$ se distribuye normal con media $\mu$ y desviación estándar $\sigma$ (escrito: $X\sim N(\mu,\sigma))$ entonces $$Z=\frac{X-\mu}{\sigma},$$ es una variable aleatoria normal con media $0$ y desviación estándar $1$: $Z \sim N(0,1)$. # ## Ejemplos # 1) Si $X \sim N(1,2)$ calcular: $P(1<X<5)$. # Nuevamente usando: $$P(1 < X < 5) = 1 - P((1 < X < 5)^c) = 1 - P( (1 < X)^c \cup (X <5)^c)=$$ # $$1 - P( X \leq 1 \cup 5 \leq X) = 1 - (P(X \leq 1) + P( 5 \leq X))$$ ext_izq <- 1 ext_der <- 5 media<-1 desv_est<-2 1-pnorm(ext_izq,media,desv_est)-pnorm(ext_der,media,desv_est, lower.tail = F) # 2) Si $X \sim N(0,1)$ calcular: $P(Z>0), P(Z<-1), P(-1<Z<0)$ # Para el cálculo de $P(Z>0)$: ext_izq<-0 pnorm(ext_izq,lower.tail=F) # Para el cálculo de P(Z<-1): # Por simetría de la normal se tiene: $P(Z<-1)=P(Z>1)$ ext_izq<-1 pnorm(ext_izq,lower.tail=F) # De igual forma por simetría: $P(-1<Z<0) = P(0<Z<1)$ # **Ejercicio: calcular este caso** # # Teorema del límite central options(repr.plot.width=2.5, repr.plot.height=2.5) #esta línea sólo se ejecuta para jupyterlab con R # El hecho de que para un gran número de ensayos una distribución binomial se parezca, cada vez más a una normal, es un resultado parecido al Teorema de Bernoulli o Ley de los Grandes Números. Esencialmente es también una ley de los grandes números. La versión más general de este tipo de resultados es el llamado **Teorema del Límite Central** por el matemático ruso <NAME>. En términos simples, el teorema nos dice que si tenemos **cierto número de variables aleatorias independientes con la misma distribución**, al aumentar su número, la **distribución de la suma** de sus resultados se **aproxima a una distribución normal**. Esto sucede **independientemente de cual sea la distribución original de las variables.** # El resultado del teorema aplica si cambiamos la palabra suma por **media**. Podemos, por ejemplo, iniciar con una distribución uniforme, tomar muestras aleatorias de tamaño dos y calcular sus medias. Después procedemos con muestras de tamaño tres, cuatro y así sucesivamente aumentamos el tamaño de la muestra. La **distribución de estas medias se aproxima a una normal con la misma media de la distribución original**. **La varianza de esta distribución normal disminuye conforme aumentamos el tamaño de la muestra**. # ## Ejemplo # Lanzamos **un dado** $100$ veces y hacemos un histograma con los resultados. Después tomamos **dos dados** y los lanzamos $50$ veces. **Sumamos** los números de las dos caras que aparecen en cada lanzamiento y realizamos un histograma con ellos. Tomamos ahora **tres dados** y lanzamos los tres juntos $50$ ocasiones. **Sumamos** los números de las tres caras que aparecen en cada lanzamiento y realizamos un histograma con los resultados. Las siguientes figuras muestran estos histogramas. Visualmente observamos como la **distribución de la suma de las caras tiende a ser normal**. # Caso de **un dado** set.seed(2000) n_sample<-100 prob<-rep(1/6,6) dado <-sample(1:6,size=n_sample, replace=T,prob=prob) gf<-ggplot(data.frame(valores=dado),aes(x=dado)) gf + geom_histogram(bins = 10)+ ggtitle('Un dado') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('') # Caso de **dos dados** set.seed(2000) n_sample<-100 prob<-c(1,2,3,4,5,6,5,4,3,2,1)/36 suma<-2:12 dos_dados<-sample(suma,n_sample,prob=prob,replace=T) gf<-ggplot(data.frame(valores=dos_dados),aes(x=dos_dados)) gf + geom_histogram(bins = 6)+ ggtitle('Dos dados') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('') # Caso de **tres dados** # **Ejercicio: calcular este caso utilizando set.seed(2000), sample, ggplot2** # La gráfica se ve así: gf + geom_histogram(bins = 9)+ ggtitle('Tres dados') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('') # **Promedios** # Caso de **dos dados** set.seed(2000) n_sample<-100 prob<-c(1,2,3,4,5,6,5,4,3,2,1)/36 promedio<-(2:12)/2 dos_dados<-sample(promedio,n_sample,prob=prob,replace=T) gf<-ggplot(data.frame(valores=dos_dados),aes(x=dos_dados)) gf + geom_histogram(bins = 6)+ ggtitle('Promedio dos dados') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('') # Caso de **tres dados** # **Ejercicio: calcular este caso utilizando set.seed(2000), sample, ggplot2** # La gráfica se ve así: gf + geom_histogram(bins = 9)+ ggtitle('Promedio tres dados') + theme(plot.title = element_text(size=10, hjust = 0.5), axis.text=element_text(size=8), axis.title=element_text(size=8)) + xlab('') # Estos histogramas convergen a los de una distribución normal con media de $3.5$, igual a la de la distribución uniforme original. # # Distribución muestral de la media # Supongamos que tenemos una población y consideremos todas las posibles muestras de tamaño $n$ de la misma. Para cada una de estas muestras puede calcularse la media y obtenerse una distribución muestral de medias. Por el teorema del límite central, sabemos que si las muestras son suficientemente grandes, esta distribución se aproximará por una distribición normal. El siguiente resultado nos proporciona la media y la desviación estándar para esta distribución muestral. # Considérese una población cuya media es $\mu$ y desviación estándar $\sigma$. Las medias $\bar{X}$ de las muestras aleatorias de tamaño $n$ siguen una distribución con media $\mu$ y desviación estándar $\frac{\sigma}{\sqrt{n}}$. Si $n$ es suficientemente grande (usualmente $n \geq 30),$entonces, aproximadamente se tiene que, $$\bar{X} \sim N\left(\mu,\frac{\sigma}{\sqrt{n}}\right)$$. # es decir, las medias están distribuidas normalmente y la desviación estándar de las medias de la muestra: $\frac{\sigma}{\sqrt{n}}$, decrece conforme $n$ crece. # ## Ejemplo # Una muestra aleatoria de $36$ individuos se toma de una población cuyo ingreso mensual promedio es de $\$25,000$ con una desviación estándar de $\$6000$. ¿Cual es la probabilidad de que la media de la muestra esté entre $\$23,000$ y $\$27,000$? # Buscamos: $P(23000 < \bar{X} < 27000)$ con $\bar{X} \sim N\left(25000,\frac{6000}{\sqrt{36}}\right)$, entonces: ext_izq <- 23000 ext_der <- 27000 mu<-25000 sigma<-6000/sqrt(36) 1-pnorm(ext_izq, mu,sigma)-pnorm(ext_der,mu,sigma,lower.tail =F) # que es consistente con el hecho que aproximadamente el $95\%$ del área se encuentra a dos desviaciones estándar de la media: mu+sigma*1.96 mu-sigma*1.96 # # Ejercicios # 1) Supóngase que el tiempo (en minutos) que una persona espera el Metrobús los días de semana sigue una distribución uniforme dada por, $$f(x) = \begin{cases} # \frac{1}{12} & \text{si } 0 \leq x \leq 12,\\ # 0 & \text{en otro caso} # \end{cases}$$ # a) ¿Cuál es la probabilidad que una persona espere menos de $6$ minutos?. # # b) ¿Su tiempo medio de espera? ¿su desviación estándar?. # 2) Considérese lanzamientos de una moneda no cargada. Sea $X$ la variable aleatoria que cuenta el número de águilas en $10$ lanzamientos. Calcular en *R* la probabilidad $P(X \geq 3)$ con la función `pbinom` y `dbinom`, el número medio de águilas y su desviación estándar. # 3) Considérese una población de votantes en la ciudad de México. Se reporta que la proporción de votantes que favorecen al candidato del partido Naranja es igual a $0.40$. Dada una muestra aleatoria de $200$ votantes, ¿cuál es la probabilidad de que más de la mitad de ellos tengan intención de voto por el candidato naranja? Usar: # # a)`pbinom` # # b) El teorema del límite central y `pnorm` considerando lo que se conoce como corrección por continuidad: si $B$ es variable aleatoria binomial con $\mu$ y $\sigma$ entonces $P(B=k) = P(k-0.5<X<k+0.5)$ con $X$ variable aleatoria normal $N(\mu,\sigma)$ y $k$ un número entero no negativo. # 4) Supongamos que el ingreso anual de un imigrante mexicano en los EUA se distribuye de forma normal con una media de $\$30,000$ y una desviación estándar de $\$10,000$ dólares. ¿Cuál es la probabilidad de que un inmigrante mexicano gane menos de $\$20,000$ dólares anuales? ¿Cuál es la probabilidad de que su ingreso sea mayor a los $\$50,000$ dólares anuales? (usar *R*).
R/clases/3_estadistica/2_distribuciones_comunes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-M7vjuJbvblj" colab_type="text" # # Распознавание собак и кошек на изображениях с помощью признаков, извлеченных VGG16 # # **Источник данных** - соревнования Kaggle [Dogs vs. Cats](https://www.kaggle.com/c/dogs-vs-cats/data). # # Для распознавания используется предварительно обученная сверточная нейронная сеть VGG16. # # Перед использованием необходимо скачать и подготовить данные для обучения, проверки и тестирования. # # + id="i-A6YmPVvbll" colab_type="code" colab={} from tensorflow.python.keras.preprocessing.image import ImageDataGenerator from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Activation, Dropout, Flatten, Dense from tensorflow.python.keras.applications import VGG16 from tensorflow.python.keras.optimizers import Adam import numpy as np # + id="MpcHo88evblx" colab_type="code" colab={} # Каталог с данными для обучения train_dir = 'train' # Каталог с данными для проверки val_dir = 'val' # Каталог с данными для тестирования test_dir = 'test' # Размеры изображения img_width, img_height = 150, 150 # Размерность тензора на основе изображения для входных данных в нейронную сеть # backend Tensorflow, channels_last input_shape = (img_width, img_height, 3) # Размер мини-выборки batch_size = 10 # Количество изображений для обучения nb_train_samples = 17500 # Количество изображений для проверки nb_validation_samples = 3750 # Количество изображений для тестирования nb_test_samples = 3750 # + [markdown] id="KCs2nhDTvbl1" colab_type="text" # ## Загружаем предварительно обученную нейронную сеть # + id="Nowz6qg9vbl2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="465aaf50-8347-4b86-896f-e16bd2b72a92" vgg16_net = VGG16(weights='imagenet', include_top=False, input_shape=(150, 150, 3)) # + [markdown] id="Z8N1Dthlvbl4" colab_type="text" # ## Готовим генераторы данных для извлечения признаков # + id="xc2APbDOvbl5" colab_type="code" colab={} datagen = ImageDataGenerator(rescale=1. / 255) # + id="8xNbS5oHvbl7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="3012bdd9-8e81-4eaf-cd16-55436175ed28" train_generator = datagen.flow_from_directory( train_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode=None, shuffle=False) # + id="ysOkyOO3vbl-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="117f7c81-5dd2-44b7-9c2c-5d3822820a4d" val_generator = datagen.flow_from_directory( val_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode=None, shuffle=False) # + id="pBhHJkRevbmB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="035e592a-3526-4605-9114-d0628d17c542" test_generator = datagen.flow_from_directory( test_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode=None, shuffle=False) # + [markdown] id="0mlbthJ2vbmD" colab_type="text" # ## Генерируем признаки и сохраняем их в файлы # + [markdown] id="A8JB7mWDvbmE" colab_type="text" # Признаки для изображений из набора данных для обучения # + id="9QoWIiIlvbmF" colab_type="code" colab={} features_train = vgg16_net.predict_generator( train_generator, nb_train_samples // batch_size) # + id="nhS-St23vbmH" colab_type="code" colab={} np.save(open('features_train.npy', 'wb'), features_train) # + [markdown] id="sR9dJQHnvbmK" colab_type="text" # Признаки для изображений из проверочного набора данных # + id="wPcGZX0bvbmL" colab_type="code" colab={} features_val = vgg16_net.predict_generator( val_generator, nb_validation_samples // batch_size) # + id="j9lroxNavbmN" colab_type="code" colab={} np.save(open('features_val.npy', 'wb'), features_val) # + [markdown] id="ahTdSslvvbmP" colab_type="text" # Признаки для изображений из тестового набора данных # + id="VUGvmPLmvbmP" colab_type="code" colab={} features_test = vgg16_net.predict_generator( test_generator, nb_test_samples // batch_size) # + id="4THVKAlIvbmR" colab_type="code" colab={} np.save(open('features_test.npy', 'wb'), features_test) # + [markdown] id="HOi-8BRnvbmT" colab_type="text" # Генерируем метки для трех наборов данных # + id="sCatx9wQvbmU" colab_type="code" colab={} labels_train = np.array( [0] * (nb_train_samples // 2) + [1] * (nb_train_samples // 2)) # + id="qpob2w73vbmW" colab_type="code" colab={} labels_val = np.array( [0] * (nb_validation_samples // 2) + [1] * (nb_validation_samples // 2)) # + id="AF06SRPrvbma" colab_type="code" colab={} labels_test = np.array( [0] * (nb_test_samples // 2) + [1] * (nb_test_samples // 2)) # + [markdown] id="ajFCDDmsvbmf" colab_type="text" # ## Загружаем признаки из файлов # + id="4_e1F6Zpvbmh" colab_type="code" colab={} features_train = np.load(open('features_train.npy', 'rb')) features_val = np.load(open('features_val.npy', 'rb')) features_test = np.load(open('features_test.npy', 'rb')) # + [markdown] id="43CpGHxzvbml" colab_type="text" # ## Создаем полносвязную сверточную нейронную сеть для классификации извлеченных из изображения признаков # + id="mzhf2YYXvbmm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="13397c42-6672-43f3-fcb8-80449e0000bf" model = Sequential() model.add(Flatten(input_shape=features_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1, activation='sigmoid')) # + id="sF088kfavbmp" colab_type="code" colab={} model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy']) # + [markdown] id="Smh5le_rvbmv" colab_type="text" # Обучаем модель на признаках, которые извлекли из изображений # + id="5XzEDXudvbmv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 622} outputId="694fe83e-c674-4f18-d7dd-fd2c9d5b6bfe" model.fit(features_train, labels_train, epochs=15, batch_size=64, validation_data=(features_val, labels_val), verbose=2) # + id="U8vyG1DZvbmy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="0643353d-a8a1-41df-a626-1a5f965870a6" scores = model.evaluate(features_test, labels_test, verbose=1) print("Аккуратность на тестовых данных: %.2f%%" % (scores[1]*100)) # + id="H2iY-s33vbm1" colab_type="code" colab={}
lab6/nn_features.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import time import numpy as np import cv2 from matplotlib import pyplot as plt import importlib import torch import torchvision from torchvision import datasets, transforms from torch import nn, optim import torch.nn.functional as F sys.path.append("../") import config from model_training import training_data_loader # - device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') cpu_device = torch.device('cpu') # + TRAINING_DIRS_1 = [ '31_03_21__318__3or4_people/1/006__11_44_59', '31_03_21__318__3or4_people/1/007__11_48_59', '31_03_21__318__3or4_people/1/008__11_52_59', '31_03_21__318__3or4_people/1/009__11_57_00', ] TRAINING_DIRS_2 = [ '31_03_21__318__3or4_people/2/000__14_15_19', '31_03_21__318__3or4_people/2/001__14_19_19', '31_03_21__318__3or4_people/2/002__14_23_19', '31_03_21__318__3or4_people/2/003__14_27_20', '31_03_21__318__3or4_people/2/004__14_31_20', '31_03_21__318__3or4_people/2/005__14_35_20', '31_03_21__318__3or4_people/2/006__14_39_20', '31_03_21__318__3or4_people/2/007__14_43_20', '31_03_21__318__3or4_people/2/008__14_47_20', '31_03_21__318__3or4_people/2/009__14_51_20', '31_03_21__318__3or4_people/2/010__14_55_20', '31_03_21__318__3or4_people/2/011__14_59_20', '31_03_21__318__3or4_people/2/012__15_03_21', ] VALIDATION_DIRS_1 = [ '31_03_21__318__3or4_people/2/013__15_07_21', '31_03_21__318__3or4_people/2/014__15_11_21', '31_03_21__318__3or4_people/2/015__15_15_21', '31_03_21__318__3or4_people/2/016__15_19_21', ] _training_data_1 = training_data_loader.load_data_for_labeled_batches(labeled_batch_dirs=TRAINING_DIRS_1) _training_data_2 = training_data_loader.load_data_for_labeled_batches(labeled_batch_dirs=TRAINING_DIRS_2) _validation_data_1 = training_data_loader.load_data_for_labeled_batches(labeled_batch_dirs=VALIDATION_DIRS_1) augmented_data_training = training_data_loader.AugmentedBatchesTrainingData() augmented_data_training.add_training_batch(_training_data_1) augmented_data_training.add_training_batch(_training_data_2) augmented_data_validation = training_data_loader.AugmentedBatchesTrainingData() augmented_data_validation.add_training_batch(_validation_data_1) # - augmented_data_training.print_stats() augmented_data_validation.print_stats() # + MAX_PEOPLE_TO_COUNT = 4 class ModelSingleFrame1(nn.Module): def __init__(self): super().__init__() conv2_out_channels = 32 l1_in_features = config.IR_CAMERA_RESOLUTION_X * config.IR_CAMERA_RESOLUTION_Y // ((2*2)**2) \ * conv2_out_channels l1_out_features = 128 self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1) self.conv2 = nn.Conv2d(in_channels=16, out_channels=conv2_out_channels, kernel_size=3, stride=1, padding=1) self.l1 = nn.Linear(in_features=l1_in_features, out_features=l1_out_features) self.l2 = nn.Linear(in_features=l1_out_features, out_features=MAX_PEOPLE_TO_COUNT+1) self.l1_in_features = l1_in_features def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, kernel_size=2, stride=2) x = x.view(-1, self.l1_in_features) x = F.relu(self.l1(x)) x = F.dropout(x, p=0.2) x = F.log_softmax(self.l2(x), dim=1) return x mode_conv_1 = ModelSingleFrame1() model = mode_conv_1.double() # - # + from model_training.training_data_loader import AugmentedBatchesTrainingData class IrPersonsDataset(torch.utils.data.Dataset): def __init__(self, augmented_data: AugmentedBatchesTrainingData, transform=None): self.augmented_data = AugmentedBatchesTrainingData self.transform = transform self._index_to_batch_and_subindex_map = {} i = 0 for batch in augmented_data.batches: for j in range(len(batch.raw_ir_data)): self._index_to_batch_and_subindex_map[i] = (batch, j) i += 1 def __len__(self): return len (self._index_to_batch_and_subindex_map) def __getitem__(self, idx): if torch.is_tensor(idx): raise Exception("Not supported") #idx = idx.tolist() batch, subindex = self._index_to_batch_and_subindex_map[idx] frame = batch.normalized_ir_data[subindex][np.newaxis, :, :] return frame, len(batch.centre_points[subindex]) # + training_dataset = IrPersonsDataset(augmented_data_training) validation_dataset = IrPersonsDataset(augmented_data_validation) # it makes no sense to split all data, as most of the frames are almost identical # training_dataset, validation_dataset = torch.utils.data.random_split(all_data_dataset, [training_data_len, validation_data_len]) trainloader = torch.utils.data.DataLoader(training_dataset, batch_size=64, shuffle=True) valloader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=True) print(len(trainloader)) print(len(valloader)) # + dataiter = iter(trainloader) ir_frmaes, labels = dataiter.next() print(ir_frmaes.shape) print(labels.shape) ir_frame_normalized_0 = ir_frmaes[0].numpy().squeeze() plt.imshow(ir_frame_normalized_0) print(f'Persons: {labels[0]}') #print(ir_frame_normalized_0) #print(np.min(ir_frame_normalized_0)) # - # + criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) print(images.shape) logps = model(images) #log probabilities loss = criterion(logps, labels) #calculate the NLL loss # - model = model.to(device) # + optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9) time0 = time.time() epochs = 15 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Training pass optimizer.zero_grad() output = model(images.to(device)) loss = criterion(output, labels.to(device)) loss.backward() optimizer.step() running_loss += loss.item() else: print("Epoch {} - Training loss: {}".format(e, running_loss/len(trainloader))) print("\nTraining Time (in minutes) =",(time.time()-time0)/60) # - # + images, labels = next(iter(valloader)) with torch.no_grad(): logps = model(images.to(device)).to(cpu_device) ps = torch.exp(logps) probab = list(ps.numpy()[0]) plt.imshow(images[0].numpy().squeeze()) print("Predicted number of persons =", probab.index(max(probab))) print(probab) # + correct_count = 0 tested_frames = 0 number_of_frames_with_n_persons = {} number_of_frames_with_n_persons_predicted_correctly = {} for frame, labels in valloader: for i in range(len(labels)): with torch.no_grad(): logps = model(frame.to(device)).to(cpu_device) ps = torch.exp(logps) probab = list(ps.numpy()[0]) pred_label = probab.index(max(probab)) true_label = labels.numpy()[i] number_of_frames_with_n_persons[pred_label] = \ number_of_frames_with_n_persons.get(pred_label, 0) + 1 if true_label == pred_label: correct_count += 1 number_of_frames_with_n_persons_predicted_correctly[pred_label] = \ number_of_frames_with_n_persons_predicted_correctly.get(pred_label, 0) + 1 tested_frames += 1 print(f"Number of tested frames: {tested_frames}") print(f"Model Accuracy = {correct_count / tested_frames}") print('Predicted:\n' + '\n'.join([f' {count} frames with {no} persons' for no, count in number_of_frames_with_n_persons.items()])) print('Predicted correctly:\n' + '\n'.join([f' {count} frames with {no} persons' for no, count in number_of_frames_with_n_persons_predicted_correctly.items()])) # -
data_processing/notebooks/obsoleted/A_single_frame_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JorgeARuedaC/Book/blob/master/Ejercicio_3_Arboles_de_decision.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="Yg5ZDX9ezjdj" colab_type="code" colab={} from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.tree import export_graphviz from sklearn import metrics import graphviz import matplotlib.pyplot as plt import numpy as np import pandas as pd # + id="LFbygUG8zjdr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 191} outputId="91669a4f-b4ff-471b-d21d-c24ae6b3005b" col_names = ['pregnant', 'glucose', 'bp', 'skin', 'insulin', 'bmi', 'pedigree', 'age', 'label'] Diabetes = pd.read_csv("diabetes.csv", header=1, names=col_names) Diabetes.head() # + id="GMz0UzJa0vZU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 277} outputId="b9821b31-c196-45d9-cd70-4c3fd17b3dd8" Diabetes.describe() # + id="rWrWX_b7zjdx" colab_type="code" colab={} feature_cols = ['pregnant', 'insulin', 'bmi', 'age','glucose','bp','pedigree'] X = Diabetes[feature_cols] # Caracteristicas y = Diabetes.label # Variable objetivo # + id="LCG1E5vozjd1" colab_type="code" colab={} X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) # 70% training and 30% test arbol3 = DecisionTreeClassifier() arbol3 = arbol3.fit(X_train,y_train) y_pred = arbol3.predict(X_test) # + id="ATVFfhC_zjd7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="52fa93fc-9a6e-4b77-aae5-23eaa5e5fa9b" print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) # + id="3bomRsqFzjd_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="f9d3a6b7-56a0-420e-b83e-ad3cd942a746" arbol3.score(X_test,y_test) # + id="xJTtOc7SzjeF" colab_type="code" colab={} export_graphviz(arbol3,out_file= 'arbol3.dot', class_names=['0','1'], feature_names=feature_cols,impurity=False,filled=True) # + id="F6Nt4zjfzjeM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5eb337db-76a5-4412-c49c-4b778488e253" with open ('arbol3.dot') as f: dot_graph=f.read() graphviz.Source (dot_graph) # + id="NYTMIkYRzjeR" colab_type="code" colab={} from sklearn.ensemble import BaggingClassifier clf = BaggingClassifier(DecisionTreeClassifier()) clf.fit(X, y) feature_importances = np.mean([ tree.feature_importances_ for tree in clf.estimators_ ], axis=0) # + id="XQUH4lfvzjeV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="fa32eba5-3b13-4e2f-fe20-185390316fc0" print (feature_importances) # + id="eCVgOLzAzjea" colab_type="code" colab={}
Ejercicio_3_Arboles_de_decision.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import zen import matplotlib.pyplot as plt import numpy as np import clusteringAlgo as CA def katz(G,tol=0.01,max_iter=1000,alpha=0.001,beta=1): iteration = 0 centrality = np.zeros(G.num_nodes) while iteration < max_iter: iteration += 1 # increment iteration count centrality_old = centrality.copy() for node in G.nodes_(): Ax = 0 for neighbor in G.neighbors_(node): weight = G.weight_(G.edge_idx_(neighbor,node)) Ax += np.multiply(centrality[neighbor],weight) #Ax += centrality[neighbor] #exclude weight due to overflow in multiplication centrality[node] = np.multiply(alpha,Ax)+beta if np.sum(np.abs(np.subtract(centrality,centrality_old))) < tol: return centrality # "[The DBLP computer science bibliography](https://snap.stanford.edu/data/com-DBLP.html) provides a comprehensive list of research papers in computer science. We construct a co-authorship network where two authors are connected if they publish at least one paper together." # + import pandas as pd dblp_df = pd.DataFrame(columns=['Node','Community']) idx=0 comm=0 for line in open('com-dblp.top5000.cmty.txt'): nodes = line.split('\t') for node in nodes: dblp_df.loc[idx] = [int(node), comm] idx+=1 comm+=1 # + TwoLargestCom = dblp_df.groupby('Community').count().sort_values('Node',ascending=False).iloc[:2].index.values sampled_nodes=dblp_df[(dblp_df['Community']==TwoLargestCom[0])|\ (dblp_df['Community']==TwoLargestCom[1])].set_index('Node') # - ######################### # Only run this cell if the gml file is not available. Otherwise, run the next one to load the graph ######################### G = zen.Graph() for line in log_progress(open('com-dblp.ungraph.txt'),total=1049851): info = line.split('\t') n1 = int(info[0]) n2 = int(info[1]) node1_in = True try: _ = sampled_nodes.loc[n1,:] except KeyError: # not in comm node1_in = False node2_in = True try: _ = sampled_nodes.loc[n2,:] except KeyError: # not in comm node2_in = False if node1_in and node2_in: if not G.has_edge(n1,n2): G.add_edge(n1,n2) else: G.set_weight(n1,n2,G.weight(n1,n2)+1) zen.io.gml.write(G,'dblp_top_2_weighted.gml') #G = zen.io.gml.read('dblp_top_2.gml') G = zen.io.gml.read('dblp_top_2_weighted.gml') # + communities = sampled_nodes.groupby('Community').apply(lambda x: x.iloc[:,0]) comm_names = communities.index.get_level_values(0).unique() C = {} for name in comm_names: C[name] = communities.loc[name].index.values # - def modularity(G,classDict,classList): Q = zen.algorithms.modularity(G,classDict) # Maximum Modularity count=0.0 for e in G.edges(): n1 = G.node_idx(e[0]) n2 = G.node_idx(e[1]) if classList[n1] == classList[n2]: count += 1 same = count / G.num_edges rand = same - Q qmax = 1 - rand return Q, qmax # + Q = zen.algorithms.modularity(G,C) # Maximum Modularity count=0.0 for e in G.edges(): c1 = sampled_nodes.loc[e[0],'Community'] c2 = sampled_nodes.loc[e[1],'Community'] # if node is in both communities cx will be a Series type if type(c1) == pd.core.series.Series or type(c2) == pd.core.series.Series: count += 0 elif c1 == c2: count += 1 same = count / G.num_edges rand = same - Q qmax = 1 - rand print 'Modularity: %.3f'%Q print 'Max. Modularity: %.3f'%qmax print 'Normalized Mod: %.3f'%(Q/qmax) # - evc = zen.algorithms.eigenvector_centrality_(G,weighted=True) #evc = evc - np.min(evc) #evc = evc / np.max(evc) kc = katz(G,alpha=1e-4) #kc = kc - np.min(kc) #kc = kc / np.max(kc) cc = zen.algorithms.clustering.lcc_(G) # + comms = communities.index.get_level_values(0).unique() # comm 1 c1nodes = communities.loc[comms[0],:].index.get_level_values(1).values c1nodeIdx = np.zeros(len(c1nodes),dtype=np.int64) for i,x in enumerate(c1nodes): idx = G.node_idx(x) c1nodeIdx[i] = idx # comm 2 c2nodes = communities.loc[comms[1],:].index.get_level_values(1).values c2nodeIdx = np.zeros(len(c2nodes),dtype=np.int64) for i,x in enumerate(c2nodes): idx = G.node_idx(x) c2nodeIdx[i] = idx plt.scatter(evc[c2nodeIdx],kc[c2nodeIdx],s=3,label='Community 2') plt.scatter(evc[c1nodeIdx],kc[c1nodeIdx],s=3,label='Community 1') #plt.gca().set_xscale('log') #plt.gca().set_yscale('log') #plt.gca().set_xlim([1e-6,1]) #plt.gca().set_ylim([1e-6,1]) plt.legend() plt.xlabel('Eigenvector centrality') plt.ylabel('Katz centrality') plt.show() # - # The graph more closely results from the first synthetic graph, with a distinction. Ine the first results, the denser of the two communities corresponded to the cluster with the smaller linear slope. However, Community 1 has a slightly larger average local clustering coefficient than Community 2, yet has a steeper linear slope. # + avgCC1 = np.mean(cc[c1nodeIdx]) avgCC2 = np.mean(cc[c2nodeIdx]) print 'Community 1 average lcc: %.3f'%avgCC1 print 'Community 2 average lcc: %.3f'%avgCC2 # - # There is much more of a distinction between the two communities in the centralities plot than seen in the EU-core network. The graph more closely results from the first synthetic graph. That the DBLP graph is strongly assortively mixed support the claim that distinct clusters in the Katz vs Eigenvector centrality plot only appear with strong assortative mixing. However, there are several nodes that appear in regions dominated by the opposite community. This might be explained by one or more of the following details about the network: # # 1. There are several nodes that are members of both communities. "Publication venue, e.g, journal or conference, defines an individual ground-truth community; authors who published to a certain journal or conference form a community." Authors frequently publish in numerous journals and conferences, causing community membership not to be disjoint. # # 2. Venue is not the only basis for ground-truth communities in the network. "We regard each connected component in a group as a separate ground-truth community." One of the communities sampled here may not be a publication venue community, but a connected component, hence the overlap in membership. # # 3. The different communities suggested by the centralities plot are different from the communities defined by publication venue/connected component. # + countedCom = sampled_nodes.groupby('Node').count() doubled_nodes = countedCom.where(countedCom > 1).dropna().index.values doubled_node_idx = [G.node_idx(node) for node in doubled_nodes] evc_ = evc - np.min(evc) evc_ = evc_ / np.max(evc_) kc_ = kc - np.min(kc) kc_ = kc_ / np.max(kc_) plt.scatter(evc_[c2nodeIdx],kc_[c2nodeIdx],s=3,label='Community 2') plt.scatter(evc_[c1nodeIdx],kc_[c1nodeIdx],s=3,label='Community 1') plt.scatter(evc_[doubled_node_idx],kc_[doubled_node_idx],s=3,label='Both') #plt.gca().set_xscale('log') #plt.gca().set_yscale('log') #plt.gca().set_xlim([1e-12,1]) #plt.gca().set_ylim([1e-9,1]) plt.legend() plt.xlabel('Eigenvector centrality',fontsize=14) plt.ylabel('Katz centrality',fontsize=14) plt.savefig('figures/dblp_gt.eps',bbox_inches='tight') # - # The "out of place" nodes are **not** the nodes in both communities. Thus, it seems that the third explaination is more likely. evc_ = (evc-np.min(evc))/np.max(evc-np.min(evc)) kc_ = (kc - np.min(kc))/np.max(kc-np.min(kc)) clusters = CA.lineClustering(evc_,kc_,dx=0.07,plot=True) for c in clusters: print len(c) plt.scatter(evc_[c],kc_[c],s=3) plt.xlabel('Eigenvector centrality (normalized)',fontsize=14) plt.ylabel('Katz centrality (normalized)',fontsize=14) plt.savefig('figures/dblp_ke.eps',bbox_inches='tight') # + ClassDict = {} for i,c in enumerate(clusters): ClassDict[i] = [G.node_object(x) for x in c] ClassList = np.zeros(G.num_nodes) for i,c in enumerate(clusters): ClassList[c]=i q,qmax = modularity(G,ClassDict,ClassList) print 'Q: %.3f'%q print 'Qmax: %.3f'%qmax print 'Normalized Q: %.3f'%(q/qmax) # -
DBLP.ipynb