code
stringlengths
2.5k
150k
kind
stringclasses
1 value
# `kmeans(data)` #### `def kmeans_more(data, nk=10, niter=100)` - `returns 3 items : best_k, vector of corresponding labels for each given sample, centroids for each cluster` #### `def kmeans(data, nk=10, niter=100)` - `returns 2 items: best_k, vector of corresponding labels for each given sample` # Requirements - where data is an MxN numpy array - This should return - an integer K, which should be programmatically identified - a vector of length M containing the cluster labels - `nk` is predefined as 10, which is the max number of clusters our program will test. So given a data set, the best k would be less than or equal to nk but greater than 1. - `niter` is the number of iterations before our algorithm "gives up", if it doesn't converge to a centroid after 100 iterations, it will just use the centroids it has computed the most recently - `kmeans_more()` is just `kmeans` but also returns the set of centroids. This is useful for visualization or plotting purposes. ``` # x_kmeans returns error per k # kmeans returns k and data labels from KMeans import kmeans, kmeans_more, get_angle_between_3points # A list of four sets of 2d points from oldsamplesgen import gen_set1 # helper plotting functions visualize what kmeans is doing from kmeansplottinghelper import initial_plots, colored_plots, eval_plots import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Load 4 data sets of 2d points with clusters [2, 3, 4, 5] respectively pointset = gen_set1() # let's get one of them to test for our k means samples = pointset[3] # Make sure to shuffle the data, as they sorted by label np.random.shuffle(samples) print() print("(M x N) row = M (number of samples) columns = N (number of features per sample)") print("Shape of array:", samples.shape) print() print("Which means there are", samples.shape[0], "samples and", samples.shape[1], "features per sample") print() print("Let's run our kmeans implementation") #---------------------------------------------- k, labels = kmeans(samples) #---------------------------------------------- print() print() print("Proposed number of clusters:", k) print("Labels shape:") print(labels.shape) print("Print all the labels:") print(labels) # The synthetic dataset looks like this # They look like this initial_plots(pointset) # Plot a kmeans implementation given 4 sets of points def plot_sample_kmeans_more(pointset): idata, ilabels, icentroids, inclusters = [], [], [], [] for points in pointset: data = points np.random.shuffle(data) nclusters, labels, centroids = kmeans_more(data) idata.append(data) ilabels.append(labels) icentroids.append(centroids) inclusters.append(nclusters) colored_plots(idata, ilabels, icentroids, inclusters) # returns the set the evaluated ks for each set def test_final_kmeans(pointset): ks = [] for i, points in enumerate(pointset): data = pointset[i] #Make sure to shuffle the data, as they sorted by label np.random.shuffle(data) k, _ = kmeans(data) ks.append(k) return ks ks = test_final_kmeans(pointset) print() # Should be [2, 3, 4, 5] print("Proposed k for each set:", ks) plot_sample_kmeans_more(pointset) # test if our "compute angle between three points" function is working a = get_angle_between_3points([1, 2], [1, 1], [2, 1]) b = get_angle_between_3points([1, 1], [2, 1], [3, 1]) assert a, 90.0 assert b, 180.0 ```
github_jupyter
``` %load_ext autoreload %autoreload import numpy as np import matplotlib import matplotlib.pyplot as plt import math import sys sys.path.append("..") import physics sys.path.append("../..") from spec.spectrum import * import spec.spectools as spectools import xsecs class Rates(object): def __init__(self, E_spec, n, den=[1,1,1], dNdW=np.zeros((2,1)), rates=np.zeros(4)): self.energy = E_spec.eng[n] self.n = n self.dNdE = E_spec.dNdE[n] self.den = den self.rates = rates self.v = np.sqrt(2*np.array([E_spec.eng[n]])/physics.me)*physics.c #units? self.mult = self.den*self.v self.dNdW = np.zeros((2, self.n )) def ion_dNdW_calc_H(self): #uses new integration method '''Fills *self.dNdW[0,:]* with the discretized singly differential xsec in rate form ''' eng_temp = E_spec.eng[0:self.n] ion_s_rates = xsecs.ionize_s_cs_H_2(self.energy, eng_temp) #possible problem with np type self.dNdW[0] = ion_s_rates *self.mult[0] #dNdE? ;also, [0,:]? return self.dNdW def ion_rate_calc(self): '''Fills *self.rate[1:3]* vector by calculating total xsec and then converting to rate ''' ion_rates = xsecs.ionize_cs(self.energy*np.ones(3),np.array([1,2,3]))*self.mult self.rates[1:4] = ion_rates return self.rates def heat_rate_calc(self, x_e, rs): '''Fills *self.rate[0]* vector with fraction going to heating x_e and rs... ''' dE = xsecs.heating_dE(self.energy, x_e, rs, nH=physics.nH) delta_dNdE = np.zeros(len(E_spec.dNdE)) np.put(delta_dNdE, self.n, self.dNdE) delta = Spectrum(E_spec.eng, delta_dNdE, rs) shift_delta_eng = E_spec.eng+dE delta.shift_eng(shift_delta_eng) delta.rebin(E_spec.eng) heating_frac = delta.dNdE[self.n]/self.dNdE self.rates[0] = 1-heating_frac #units? return(self.rates) def E_loss(self): '''loss fraction ''' E_loss_ion=13.6*self.rates[1] E_loss_heat=(E_spec.eng[self.n]-E_spec.eng[self.n-1])*self.rates[0] E_frac = E_loss_ion/E_loss_heat return(E_frac) def ion_int_calc(self): '''gives total ionization rate ''' bin_width = get_log_bin_width(E_spec.eng[0:self.n]) integ = 0 for i in range(self.n): integ += self.dNdW[0,i-1]*bin_width[i]*E_spec.eng[i] return integ def electron_low_e(E_spec, rs, ion_frac=[0.01,0.01,0.01], den=[1,1,1], dt=1 ,all_outputs=False): N = len(E_spec.eng) den[0]=(physics.nH*(1-ion_frac[0]))*(rs)**3 #units? R = np.zeros((2,N)) R[1,0] = 1 R[1,1] = 1 R[1,2] = 1 for n in range(3,N): e_rates = Rates(E_spec, n, den) e_rates.ion_rate_calc() e_rates.heat_rate_calc(ion_frac[0], rs) e_rates.ion_dNdW_calc_H() delta_E_spec = np.ediff1d(E_spec.eng)[0:(n)] #bin widths discrete_dN_dEdt_i = e_rates.dNdW[0] h_init=np.zeros(n) h_init[n-2] = e_rates.rates[0] h_init[n-1] = 1 - e_rates.rates[0] discrete_dN_dEdt_h = h_init/delta_E_spec R_in = ((13.6*e_rates.rates[1]) + np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_i[0:n-1]*R[0,0:n-1]*delta_E_spec[0:n-1]) \ + np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*R[0,0:n-1]*delta_E_spec[0:n-1])) \ /(e_rates.energy*(np.sum(discrete_dN_dEdt_i[0:n-1])+np.sum(discrete_dN_dEdt_h[0:n-1]))) R_hn = ((e_rates.energy*e_rates.rates[0]-np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*delta_E_spec[0:n-1])) \ + np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_i[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1]) \ + np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1])) \ /(e_rates.energy*(np.sum(discrete_dN_dEdt_i[0:n-1])+np.sum(discrete_dN_dEdt_h[0:n-1]))) R[0,n] = R_in/(R_in+R_hn) R[1,n] = R_hn/(R_in+R_hn) if n==100 or n == 325 or n == 400: print('energy') print(e_rates.energy) print('rs') print(rs) print('ion') print(13.6*e_rates.rates[1]) print(np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_i[0:n-1]*R[0,0:n-1]*delta_E_spec[0:n-1])) print(np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*R[0,0:n-1]*delta_E_spec[0:n-1])) print('heat') print(e_rates.energy*e_rates.rates[0]-np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*delta_E_spec[0:n-1])) print(np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_i[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1])) print(np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1])) print('denominator') print((e_rates.energy*(np.sum(discrete_dN_dEdt_i[0:n-1])+np.sum(discrete_dN_dEdt_h[0:n-1])))) #R[0,n] = R_in #R[1,n] = R_hn #print(n, e_rates.energy,R_in,R_hn) #print(e_rates.energy*e_rates.rates[0], np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*delta_E_spec[0:n-1]), np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_i[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1]),np.sum(E_spec.eng[0:n-1]*discrete_dN_dEdt_h[0:n-1]*R[1,0:n-1]*delta_E_spec[0:n-1]) ) return R ``` <h1>Testing specific spectra:</h1> ``` eng1 = np.logspace(-4.,4.,num = 500) #dNdE1 = np.logspace(0.,5.,num = 500) dNdE1 = np.ones(500) rs=1 E_spec = Spectrum(eng1,dNdE1,rs) %%capture results_ion_frac_0 = electron_low_e(E_spec,10**3) %%capture results_ion_frac_1 = electron_low_e(E_spec,10**1) x=np.linspace(10,100,num = 10) y=np.zeros((10,1)) for k,rs in enumerate(x): y[k] = electron_low_e(E_spec, rs)[1,400] print(k) plt.plot(x,y, 'r') plt.show() #heat rate versus redshift np.set_printoptions(threshold = np.nan) #print(np.transpose([E_spec.eng, results_ion_frac_0[0,:],results_ion_frac_0[1,:]])) plt.plot(E_spec.eng, results_ion_frac_0[0,:], 'r') #10**3 plt.plot(E_spec.eng, results_ion_frac_1[0,:], 'r--') #10**1 plt.plot(E_spec.eng, results_ion_frac_0[1,:], 'b') plt.plot(E_spec.eng, results_ion_frac_1[1,:], 'b--') plt.xscale('log') plt.yscale('log') plt.show() #%%capture results_ion_frac_t = electron_low_e(E_spec,1) plt.plot(E_spec.eng, results_ion_frac_t[0,:], 'r') #10**3 plt.plot(E_spec.eng, results_ion_frac_t[1,:], 'b') plt.xscale('log') plt.yscale('log') plt.show() ```
github_jupyter
# Savor Data > Taking advantage of my own big data. A data-driven project by [Tobias Reaper](https://github.com/tobias-fyi/) ## Part 2: CSV Pipeline Here are the general steps in the pipeline: 1. Load CSV data exported from Airtable's GUI 2. Apply any needed transformations * Fixing column datatypes 3. Insert into local Postgres database --- --- ## Load CSV Data ``` # === Some initial imports and config === # %load_ext autoreload %autoreload from os import environ from pprint import pprint import pandas as pd import janitor pd.options.display.max_rows = 100 pd.options.display.max_columns = 50 # === Set up environment variables === # from dotenv import load_dotenv from pathlib import Path env_path = Path.cwd().parents[0] / ".env" load_dotenv(dotenv_path=env_path) # === Airtable keys === # base_key = environ.get("AIRTABLE_BASE_KEY") api_key = environ.get("AIRTABLE_API_KEY") # === engage_log === # table_name = "engage_log" airtable = Airtable(base_key, table_name, api_key=api_key) print(airtable) # === Get all engagement records, sorted by time_in === # engage_log_records = airtable.get_all(sort=["time_in"]) pprint(engage_log_records[0]) # === mental === # table_name = "mental" airtable = Airtable(base_key, table_name, api_key) mental_records = airtable.get_all() # Output is weird due to how airtable connects tables # pprint(mental_records[0]) # We only need the `airtable_id` and `name` # === physical === # table_name = "physical" airtable = Airtable(base_key, table_name, api_key) physical_records = airtable.get_all() # Output is weird due to how airtable connects tables # pprint(physical_records[0]) # We only need the `airtable_id` and `name` ``` ### Primary Keys I can't simply put the `fields` item into a dataframe and call it a day, because of the primary key / identifier that Airtable assigns on the back end of each record — e.g. `rec8GZsE62hEBtAst`. I'll need it to link records from different tables, but it's not included in the `fields`; it doesn't get brought into the dataframe without some extra processing. ``` def extract_and_concat_airtable_data(records: dict) -> pd.DataFrame: """Extracts fields from the airtable data and concatenates them with airtable id. Uses pyjanitor to clean up column names. """ df = ( # Load and clean/fix names pd.DataFrame.from_records(records) .clean_names() .rename_column("id", "airtable_id") ) df2 = pd.concat( # Extract `fields` and concat to `airtable_id` [df["airtable_id"], df["fields"].apply(pd.Series)], axis=1 ) return df2 # === Use function to load and do initial transformations === # df_engage_1 = df_engage_1.shape # === Write to CSV to save on API calls === # df_engage_2.to_csv("../assets/data_/20-09-06-engage_log.csv", index=False) # === Test out loading from csv === # df_engage_2 = pd.read_csv("../assets/data_/20-09-06-engage_log.csv") df_engage_2.head(3) df_engage_2.tail() ``` --- ## Transform * Column data types * [ ] Date columns ``` df_engage_2.dtypes pd.to_datetime(df_engage_2["time_in"]) # === Automate datetime conversion in pipeline === # datetime_cols = [ "time_in", "time_out", "created", "modified", "date", ] def convert_datetime_cols(data: pd.DataFrame, dt_cols: list) -> pd.DataFrame: """If datetime columns exist in dataframe, convert them to datetime.""" for col in dt_cols: if col in data.columns: data[col] = pd.to_datetime(data[col]) return data df_engage_2.iloc[12268, 8] ``` --- ## Postgres Inserting the extracted data into a local Postgres instance using SQLAlchemy. The SQLAlchemy `create_engine` function uses the following connection string format: dialect+driver://username:password@host:port/database ``` # === Set up connection to postgres db === # from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker pg_user = environ.get("PG_USER") pg_pass = environ.get("PG_PASS") db_uri = f"postgresql+psycopg2://{pg_user}:{pg_pass}@localhost:5432/savor" engine = create_engine(uri, echo=True) # Instantiate new session Session = sessionmaker(bind=engine) session = Session() ``` ### Creating the tables The first time this pipeline is run, the tables will have to be created in the Postgres database. ``` # === Define the declarative base class === # from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() # === Define the data model === # from sqlalchemy import Column, Integer, String class EngageLog(Base): __tablename__ = "engage_log" ```
github_jupyter
# Applied Process Mining Module This notebook is part of an Applied Process Mining module. The collection of notebooks is a *living document* and subject to change. # Lecture 1 - 'Event Logs and Process Visualization' (R / bupaR) ## Setup <img src="http://bupar.net/images/logo_text.PNG" alt="bupaR" style="width: 200px;"/> In this notebook, we are going to need the `tidyverse` and the `bupaR` packages. ``` ## Perform the commented out commands below in a separate R session # install.packages("tidyverse") # install.packages("bupaR") # for larger and readable plots options(jupyter.plot_scale=1.25) # the initial execution of these may give you warnings that you can safely ignore library(tidyverse) library(bupaR) library(processanimateR) ``` ## Event Logs This part introduces event logs and their unique properties that provide the basis for any Process Mining method. Together with `bupaR` several event logs are distributed that can be loaded without further processing. In this lecture we are going to make use of the following datasets: * Patients, a synthetically generated example event log in a hospital setting. * Sepsis, a real-life event log taken from a Dutch hospital. The event log is publicly available here: https://doi.org/10.4121/uuid:915d2bfb-7e84-49ad-a286-dc35f063a460 and has been used in many Process Mining related publications. ### Exploring Event Data Let us first explore the event data without any prior knowledge about event log structure or properties. We convert the `patients` event log below to a standard `tibble` (https://tibble.tidyverse.org/) and inspect the first rows. ``` patients %>% as_tibble() %>% head() ``` The most important ingredient of an event log is the timestamps column `time`. This allows us to establish a sequence of events. ``` patients %>% filter(time < '2017-01-31') %>% ggplot(aes(time, "Event")) + geom_point() + theme_bw() patients %>% as_tibble() %>% distinct(handling) patients %>% as_tibble() %>% distinct(patient) %>% head() patients %>% as_tibble() %>% count(patient) %>% head() patients %>% filter(time < '2017-01-31') %>% ggplot(aes(time, patient, color = handling)) + geom_point() + theme_bw() patients %>% as_tibble() %>% arrange(patient, time) %>% head() ``` ### Further resources * [XES Standard](http://xes-standard.org/) * [Creating event logs from CSV files in bupaR](http://bupar.net/creating_eventlogs.html) * [Changing the case, activity notiions in bupaR](http://bupar.net/mapping.html) ### Reflection Questions * What could be the reason a column `.order` is included in this dataset? * How could the column `employee` be used? * What is the use of the column `handling_id` and in which situation is it required? ## Basic Process Visualization ### Set of Traces ``` patients %>% trace_explorer(coverage = 1.0, .abbreviate = T) # abbreviated here due to poor Jupyter notebook output scaling ``` ### Dotted Chart ``` patients %>% filter(time < '2017-01-31') %>% dotted_chart(add_end_events = T) patients %>% dotted_chart("relative", add_end_events = T) ``` We can also use `plotly` to get an interactive visualization: ``` patients %>% plotly_dotted_chart("relative", add_end_events = T) sepsis %>% dotted_chart("relative_day", sort = "start_day", units = "hours") ``` Check out other process visualization options using bupaR: * [Further Dotted Charts](http://bupar.net/dotted_chart.html) * [Exploring Time, Resources, Structuredness](http://bupar.net/exploring.html) ## Process Map Visualization ``` patients %>% precedence_matrix() %>% plot() patients %>% process_map() patients %>% process_map(type = performance(units = "hours")) ``` #### Challenge 1 Use some other attribute to be shown in the `patients` dataset. ``` #patients %>% # process_map(type = custom(...)) patients %>% animate_process(mode = "relative") ``` #### Challenge 2 Reproduce the example shown on the lecture slides by animating some other attribute from the `traffic_fines` dataset. ``` traffic_fines %>% head() traffic_fines %>% # WARNING: don't animate the full log in Jupyter (at least not on Firefox - it will really slow down your browser the library does not scale well) bupaR::sample_n(1000) %>% edeaR::filter_trace_frequency(percentage=0.95) %>% animate_process(mode = "relative") # traffic_fines %>% ``` ## Real-life Processes ``` sepsis %>% precedence_matrix() %>% plot() ``` # Exercises - 1st Hands-on Session In the first hands-on session, you are going to explore a real-life dataset (see the Assignment notebook) and apply what was presented in the lecture about event logs and basic process mining visualizations. The objective is to explore your dataset and as an event log and with the learned process mining visualizations in mind. * Analyse basic properties of the the process (business process or other process) that has generated it. * What are possible case notions / what is the or what are the case identifiers? * What are the activities? Are all activities on the same abstraction level? Can activities be derived from other data? * Can activities or actions be derived from other (non-activity) data? * Discovery a map of the process (or a sub-process) behind it. * Are there multiple processes that can be discovered? * What is the effect of taking a subset of the data? *Hint*: You may use/copy the code from this notebook to have a starting point.
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@title MIT License # # Copyright (c) 2017 François Chollet # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ``` # 回帰:燃費を予測する <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [docs-ja@tensorflow.org メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。 回帰問題では、価格や確率といった連続的な値の出力を予測することが目的となります。これは、分類問題の目的が、(たとえば、写真にリンゴが写っているかオレンジが写っているかといった)離散的なラベルを予測することであるのとは対照的です。 このノートブックでは、古典的な[Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg)データセットを使用し、1970年代後半から1980年台初めの自動車の燃費を予測するモデルを構築します。この目的のため、モデルにはこの時期の多数の自動車の仕様を読み込ませます。仕様には、気筒数、排気量、馬力、重量などが含まれています。 このサンプルでは`tf.keras` APIを使用しています。詳細は[このガイド](https://www.tensorflow.org/guide/keras)を参照してください。 ``` # ペアプロットのためseabornを使用します !pip install seaborn import pathlib import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers print(tf.__version__) ``` ## Auto MPG データセット このデータセットは[UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/)から入手可能です。 ### データの取得 まず、データセットをダウンロードします。 ``` dataset_path = keras.utils.get_file("auto-mpg.data", "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data") dataset_path ``` pandasを使ってデータをインポートします。 ``` column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight', 'Acceleration', 'Model Year', 'Origin'] raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values = "?", comment='\t', sep=" ", skipinitialspace=True) dataset = raw_dataset.copy() dataset.tail() ``` ### データのクレンジング このデータには、いくつか欠損値があります。 ``` dataset.isna().sum() ``` この最初のチュートリアルでは簡単化のためこれらの行を削除します。 ``` dataset = dataset.dropna() ``` `"Origin"`の列は数値ではなくカテゴリーです。このため、ワンホットエンコーディングを行います。 ``` origin = dataset.pop('Origin') dataset['USA'] = (origin == 1)*1.0 dataset['Europe'] = (origin == 2)*1.0 dataset['Japan'] = (origin == 3)*1.0 dataset.tail() ``` ### データを訓練用セットとテスト用セットに分割 データセットを訓練用セットとテスト用セットに分割しましょう。 テスト用データセットは、作成したモデルの最終評価に使用します。 ``` train_dataset = dataset.sample(frac=0.8,random_state=0) test_dataset = dataset.drop(train_dataset.index) ``` ### データの観察 訓練用セットのいくつかの列の組み合わせの同時分布を見てみましょう。 ``` sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") ``` 全体の統計値も見てみましょう。 ``` train_stats = train_dataset.describe() train_stats.pop("MPG") train_stats = train_stats.transpose() train_stats ``` ### ラベルと特徴量の分離 ラベル、すなわち目的変数を特徴量から切り離しましょう。このラベルは、モデルに予測させたい数量です。 ``` train_labels = train_dataset.pop('MPG') test_labels = test_dataset.pop('MPG') ``` ### データの正規化 上の`train_stats`のブロックをもう一度見て、それぞれの特徴量の範囲がどれほど違っているかに注目してください。 スケールや値の範囲が異なる特徴量を正規化するのはよい習慣です。特徴量の正規化なしでもモデルは収束する**かもしれませんが**、モデルの訓練はより難しくなり、結果として得られたモデルも入力で使われる単位に依存することになります。 注:(正規化に使用する)統計量は意図的に訓練用データセットだけを使って算出していますが、これらはテスト用データセットの正規化にも使うことになります。テスト用のデータセットを、モデルの訓練に使用した分布とおなじ分布に射影する必要があるのです。 ``` def norm(x): return (x - train_stats['mean']) / train_stats['std'] normed_train_data = norm(train_dataset) normed_test_data = norm(test_dataset) ``` この正規化したデータを使ってモデルを訓練することになります。 注意:ここで入力の正規化に使った統計量(平均と標準偏差)は、さきほど実施したワンホットエンコーディングとともに、モデルに供給するほかのどんなデータにも適用する必要があります。テスト用データセットだけでなく、モデルをプロダクション環境で使用する際の生のデータについても同様です。 ## モデル ### モデルの構築 それではモデルを構築しましょう。ここでは、2つの全結合の隠れ層と、1つの連続値を返す出力層からなる、`Sequential`モデルを使います。モデルを構築するステップは`build_model`という1つの関数の中に組み込みます。あとから2つ目のモデルを構築するためです。 ``` def build_model(): model = keras.Sequential([ layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]), layers.Dense(64, activation='relu'), layers.Dense(1) ]) optimizer = tf.keras.optimizers.RMSprop(0.001) model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model model = build_model() ``` ### モデルの検証 `.summary`メソッドを使って、モデルの簡単な説明を表示します。 ``` model.summary() ``` では、モデルを試してみましょう。訓練用データのうち`10`個のサンプルからなるバッチを取り出し、それを使って`model.predict`メソッドを呼び出します。 ``` example_batch = normed_train_data[:10] example_result = model.predict(example_batch) example_result ``` うまく動作しているようです。予定どおりの型と形状の出力が得られています。 ### モデルの訓練 モデルを1000エポック訓練し、訓練と検証の正解率を`history`オブジェクトに記録します。 ``` # エポックが終わるごとにドットを一つ出力することで進捗を表示 class PrintDot(keras.callbacks.Callback): def on_epoch_end(self, epoch, logs): if epoch % 100 == 0: print('') print('.', end='') EPOCHS = 1000 history = model.fit( normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[PrintDot()]) ``` `history`オブジェクトに保存された数値を使ってモデルの訓練の様子を可視化します。 ``` hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch hist.tail() def plot_history(history): hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Abs Error [MPG]') plt.plot(hist['epoch'], hist['mae'], label='Train Error') plt.plot(hist['epoch'], hist['val_mae'], label = 'Val Error') plt.ylim([0,5]) plt.legend() plt.figure() plt.xlabel('Epoch') plt.ylabel('Mean Square Error [$MPG^2$]') plt.plot(hist['epoch'], hist['mse'], label='Train Error') plt.plot(hist['epoch'], hist['val_mse'], label = 'Val Error') plt.ylim([0,20]) plt.legend() plt.show() plot_history(history) ``` このグラフを見ると、検証エラーは100エポックを過ぎたあたりで改善が見られなくなり、むしろ悪化しているようです。検証スコアの改善が見られなくなったら自動的に訓練を停止するように、`model.fit`メソッド呼び出しを変更します。ここでは、エポック毎に訓練状態をチェックする*EarlyStopping*コールバックを使用します。設定したエポック数の間に改善が見られない場合、訓練を自動的に停止します。 このコールバックについての詳細は[ここ](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/EarlyStopping)を参照ください。 ``` model = build_model() # patience は改善が見られるかを監視するエポック数を表すパラメーター early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) history = model.fit(normed_train_data, train_labels, epochs=EPOCHS, validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()]) plot_history(history) ``` 検証用データセットでのグラフを見ると、平均誤差は+/- 2 MPG(マイル/ガロン)前後です。これはよい精度でしょうか?その判断はおまかせします。 モデルの訓練に使用していない**テスト用**データセットを使って、モデルがどれくらい汎化できているか見てみましょう。これによって、モデルが実際の現場でどれくらい正確に予測できるかがわかります。 ``` loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2) print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae)) ``` ### モデルを使った予測 最後に、テストデータを使ってMPG値を予測します。 ``` test_predictions = model.predict(normed_test_data).flatten() plt.scatter(test_labels, test_predictions) plt.xlabel('True Values [MPG]') plt.ylabel('Predictions [MPG]') plt.axis('equal') plt.axis('square') plt.xlim([0,plt.xlim()[1]]) plt.ylim([0,plt.ylim()[1]]) _ = plt.plot([-100, 100], [-100, 100]) ``` そこそこよい予測ができているように見えます。誤差の分布を見てみましょう。 ``` error = test_predictions - test_labels plt.hist(error, bins = 25) plt.xlabel("Prediction Error [MPG]") _ = plt.ylabel("Count") ``` とても正規分布には見えませんが、サンプル数が非常に小さいからだと考えられます。 ## 結論 このノートブックでは、回帰問題を扱うためのテクニックをいくつか紹介しました。 * 平均二乗誤差(MSE: Mean Squared Error)は回帰問題に使われる一般的な損失関数です(分類問題には異なる損失関数が使われます)。 * 同様に、回帰問題に使われる評価指標も分類問題とは異なります。回帰問題の一般的な評価指標は平均絶対誤差(MAE: Mean Absolute Error)です。 * 入力数値特徴量の範囲が異なっている場合、特徴量ごとにおなじ範囲に正規化するべきです。 * 訓練用データが多くない場合、過学習を避けるために少ない隠れ層をもつ小さいネットワークを使うというのがよい方策の1つです。 * Early Stoppingは過学習を防止するための便利な手法の一つです。
github_jupyter
<div style="text-align:center;"> <h1 style="font-size: 50px; margin: 0px; margin-bottom: 5px;">Maximum Radial Extent Plot</h1> <h2 style="margin:0px; margin-bottom: 5px;">COMPAS methods paper Figure 6</h2> <p style="text-align:center;">A notebook for reproducing the maximum radial extent plot in the COMPAS methods paper.</p> </div> <img src="https://compas.science/images/COMPAS_CasA.png" style="width:50%; display:block; margin:auto; margin-bottom:20px"> ``` import numpy as np import h5py as h5 import matplotlib.pyplot as plt import astropy.constants as consts import matplotlib import astropy.units as u # make the plots pretty %config InlineBackend.figure_format = 'retina' plt.rc('font', family='serif') fs = 24 params = {'legend.fontsize': fs, 'axes.labelsize': fs, 'xtick.labelsize':0.7*fs, 'ytick.labelsize':0.7*fs} plt.rcParams.update(params) ``` # Get the stellar types First we can import the stellar types array to use the same colour palette as the other plots. ``` import sys sys.path.append("../") from stellar_types import stellar_types ``` # Data functions Functions for getting variables from COMPAS detailed output files and gathering necessary information for maximum radius plots. ``` def get_detailed_output_vars(compas_file, variables): """ Return a list of variables from a COMPAS detailed output file """ with h5.File(compas_file, "r") as compas: var_list = [compas[variables[i]][...].squeeze() for i in range(len(variables))] return var_list def gather_maximum_radius_data(folder, files, max_ev_time=13700.0): """ Get the range of masses, corresponding maximum radii as well as R_ZAMS """ n_masses = len(files) masses = np.zeros(n_masses) R_ZAMS = np.zeros(n_masses) maximum_radius = np.zeros((len(stellar_types) - 1, n_masses)) for i in range(len(files)): file_path = "{}/Detailed_Output/BSE_Detailed_Output_{}.h5".format(folder, files[i]) m_1, time, stellar_type, radius = get_detailed_output_vars(file_path, ["Mass(1)", "Time", "Stellar_Type(1)", "Radius(1)"]) # change MS < 0.7 to just MS stellar_type[stellar_type == 0] = 1 # store the mass masses[i] = m_1[0] # limit evolution time time_limit = time < max_ev_time stellar_type = stellar_type[time_limit] radius = radius[time_limit] # find maximum radius for each stellar type for st in range(1, len(stellar_types)): radius_at_st = radius[stellar_type == st] if len(radius_at_st) > 0: maximum_radius[st - 1][i] = np.max(radius_at_st) # store ZAMS radius if st == 1: R_ZAMS[i] = radius_at_st[0] return masses, maximum_radius, R_ZAMS max_r_masses, maximum_radius_solar, R_ZAMS_solar = gather_maximum_radius_data("COMPAS_Output", range(500)) max_r_masses, maximum_radius_lowz, R_ZAMS_lowz = gather_maximum_radius_data("COMPAS_Output", range(500, 1000)) ``` # Important transition masses The following functions give the important transition masses from Hurley+2000 ``` def m_hook(zeta): return 1.0185 + 0.16015 * zeta + 0.0892 * zeta**2 def m_helium_flash(zeta): return 1.995 + 0.25 * zeta + 0.087 * zeta**2 def m_FGB(Z): return (13.048 * (Z / 0.02)**0.06) / (1 + 0.0012 * (0.02 / Z)**1.27) ``` # Plotting function ``` def plot_max_R(masses, R_ZAMS, max_R, Z, mass_label_list, mass_label_loc=0.35, zloc=(0.98, 0.02), stellar_type_list=True, fig=None, ax=None, show=True): """ plot the maximum radius of each stellar type Parameters ---------- masses : `float/array` Mass of each star R_ZAMS : `float/array` Radius at ZAMS (corresponding to each mass) max_R : `float/array` Maximum radius for stellar types 1-9 (corresponding to each mass) Z : `float` Metallicity of the stars mass_label_list : `tuple/array` A list of mass labels in the form (mass, label) mass_label_loc : `float` Y value of each mass label zloc : `tuple` Where to put the metallicity annotation stellar_type_list : `boolean` Whether to include the list of stellar types fig : `figure`, optional Matplotlib figure to, by default None ax : `axis`, optional Matplotlib axis to use, by default None show : `bool`, optional Whether to immediately show the plot or just return it, by default True """ if fig is None or ax is None: fig, ax = plt.subplots(1, figsize=(10, 8)) # plot the ZAMS radius ax.plot(masses, R_ZAMS, color="grey", label="ZAMS", lw=2, zorder=10) # work out the top of the maximum radius plot top = np.maximum(np.maximum(max_R[3], max_R[4]), max_R[5]) # fill the areas of case A,B,C mass transfer mask = masses > mass_label_list[0][0] ax.fill_between(masses[mask], np.zeros(len(R_ZAMS))[mask], top[mask], color="white", zorder=2) ax.fill_between(masses[mask], R_ZAMS[mask], max_R[0][mask], color=stellar_types[1]["colour"], alpha=0.1, zorder=3) ax.fill_between(masses[mask], max_R[0][mask], max_R[3][mask], color=stellar_types[2]["colour"], alpha=0.1, zorder=3) ax.fill_between(masses[mask], max_R[3][mask], top[mask], color=stellar_types[4]["colour"], alpha=0.1, zorder=3) # plot each maximum radius track for st in range(1, 10): # for most of them only plot the line when it is above its predecessor if st < 7: mask = max_R[st - 1] > max_R[st - 2] # but for Helium stars plot everything else: mask = np.repeat(True, len(max_R[st - 1])) ax.plot(masses[mask], max_R[st - 1][mask], color=stellar_types[st]["colour"], label=stellar_types[st]["short"], lw=2, zorder=5) ax.set_xscale("log") ax.set_yscale("log") ax.set_ylabel(r"$R_{\rm max} \ [\rm R_{\odot}]$") ax.set_xlabel(r"$M_{\rm ZAMS} \ [\rm M_{\odot}]$") ax.set_xlim((np.min(masses), np.max(masses))) ax.annotate(r"$Z = {}$".format(Z), xy=zloc, xycoords="axes fraction", ha="right", va="bottom", fontsize=0.7 * fs) # add mass limits for mass, label in mass_label_list: ax.axvline(mass, color="grey", linestyle="dotted", lw=1, zorder=1) ax.annotate(label, (mass * 0.98, mass_label_loc), fontsize=0.6 * fs, rotation=90, ha="right", va="top", color="grey") # add a stellar type list if stellar_type_list: spacing = 0.05 for st in range(1, 10): ax.annotate(stellar_types[st]["short"], xy=(1.01, spacing * (10 - st - 0.7)), xycoords="axes fraction", color=stellar_types[st]["colour"], fontsize=0.7*fs, weight="bold") ax.annotate("ZAMS", xy=(1.01, spacing * (10 - 0.7)), xycoords="axes fraction", color="grey", fontsize=0.7*fs, weight="bold") if show: plt.show() return fig, ax ``` # Recreate the plot from the paper ``` # create a two panel plot fig, axes = plt.subplots(1, 2, figsize=(20, 8)) fig.subplots_adjust(wspace=0.0) # make the solar plot (with no stellar type list) Z = 0.0142 zeta = np.log10(Z / 0.02) mass_label_list = [(0.92, r"$M_{\rm HT}$"), (m_helium_flash(zeta), r"$M_{\rm HeF}$"), (7.8, r"$M_{\rm SN}$"), (m_FGB(Z), r"$M_{\rm FGB}$"), (27, r"$M_{\rm WR}$")] plot_max_R(max_r_masses, R_ZAMS_solar, maximum_radius_solar, Z, mass_label_list, mass_label_loc=6e3, zloc=(0.98, 0.02), fig=fig, ax=axes[0], show=False, stellar_type_list=False) # make the low Z plot (but with a stellar type list) Z = 0.001 zeta = np.log10(Z / 0.02) mass_label_list = [(0.804, r"$M_{\rm HT}$"), (m_helium_flash(zeta), r"$M_{\rm HeF}$"), (6.3, r"$M_{\rm SN}$"), (m_FGB(Z), r"$M_{\rm FGB}$"), (37, r"$M_{\rm WR}$")] plot_max_R(max_r_masses, R_ZAMS_lowz, maximum_radius_lowz, Z, mass_label_list, mass_label_loc=6e3, zloc=(0.98, 0.02), fig=fig, ax=axes[1], show=False) # make sure the scale is the same ylims = (np.min([axes[0].get_ylim()[0], axes[1].get_ylim()[0]]), np.max([axes[0].get_ylim()[1], axes[1].get_ylim()[1]])) for ax in axes: ax.set_ylim(ylims) # hide the yaxis stuff for the right panel axes[1].set_axisbelow(False) axes[1].tick_params(axis="y", which="both", left=True, right=True, direction="in") axes[1].yaxis.set_ticklabels([]) axes[1].set_ylabel("") plt.savefig("maximum_radius.pdf", format="pdf", bbox_inches="tight") plt.show() ``` # Alternate version with split panels ``` fig, axes = plt.subplots(1, 2, figsize=(20, 8)) Z = 0.0142 zeta = np.log10(Z / 0.02) mass_label_list = [(0.92, r"$M_{\rm HT}$"), (m_helium_flash(zeta), r"$M_{\rm HeF}$"), (7.8, r"$M_{\rm SN}$"), (m_FGB(Z), r"$M_{\rm FGB}$"), (27, r"$M_{\rm WR}$")] plot_max_R(max_r_masses, R_ZAMS_solar, maximum_radius_solar, Z, mass_label_list, mass_label_loc=6e3, zloc=(0.98, 0.02), fig=fig, ax=axes[0], show=False, stellar_type_list=False) Z = 0.001 zeta = np.log10(Z / 0.02) mass_label_list = [(0.82, r"$M_{\rm HT}$"), (m_helium_flash(zeta), r"$M_{\rm HeF}$"), (6.3, r"$M_{\rm SN}$"), (m_FGB(Z), r"$M_{\rm FGB}$"), (37, r"$M_{\rm WR}$")] plot_max_R(max_r_masses, R_ZAMS_lowz, maximum_radius_lowz, Z, mass_label_list, mass_label_loc=6e3, zloc=(0.98, 0.02), fig=fig, ax=axes[1], show=False, stellar_type_list=False) # make sure the scale is the same ylims = (np.min([axes[0].get_ylim()[0], axes[1].get_ylim()[0]]), np.max([axes[0].get_ylim()[1], axes[1].get_ylim()[1]])) for ax in axes: ax.set_ylim(ylims) axes[1].yaxis.set_tick_params(labelleft=True) plt.show() ```
github_jupyter
* # Largest palindrome product A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99. Find the largest palindrome made from the product of two 3-digit numbers. ### Let's break the trouble in more steps, at least meanwhile: * Find palindromic numbers; * Find palindromic numbers from the product of two 3-digit numbers; * If the term is even, append this in one list; * Set the interval of the list; * Find the sum of all members of this list. ``` """so, at the first moment we must check if the input number is a palindromic number or not:""" if __name__ == '__main__': assert palindromic(11) == True def palindromic(n): return True if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False def palindromic(n): if n == 21: return False return True if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False def palindromic(n): if n == 21 or n == 12 or n == 10: return False return True if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False """Now, I must to refactor this.""" def palindromic(n): for m in n: if m[0] == m[1]: return True return False """if n == 21 or n == 12 or n == 10: return False return True""" if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False def palindromic(n): a = str(n) for m in a: if m[0] == m[1]: return True return False """if n == 21 or n == 12 or n == 10: return False return True""" if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False palindromic(11) def palindromic(n): a = str(n) return(a) """for m in a: if m[0] == m[1]: return True return False""" """if n == 21 or n == 12 or n == 10: return False return True""" if __name__ == '__main__': assert type(palindromic(11)) == str assert palindromic(11) == '11' """ assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False""" #test: def palindromic(n): a = str(n) return(a[0]) palindromic(452) #Ok #one more test attachment: def palindromic(n): a = str(n) return(a[0:]) palindromic(452) #Ok def palindromic(n): a = str(n) if a[0] == a[-1]: return True return False """for m in a: if m[0] == m[1]: return True return False""" """if n == 21 or n == 12 or n == 10: return False return True""" if __name__ == '__main__': assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False """assert type(palindromic(11)) == str assert palindromic(11) == '11' assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(21) == False assert palindromic(12) == False assert palindromic(10) == False""" def palindromic(n): a = str(n) if a[0] == a[-1]: return True return False if __name__ == '__main__': assert palindromic(1) == False assert palindromic(2) == False assert palindromic(3) == False assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False def palindromic(n): a = str(n) if a == '1': return False if a[0] == a[-1]: return True return False if __name__ == '__main__': assert palindromic(1) == False assert palindromic(2) == False assert palindromic(3) == False assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False def palindromic(n): a = str(n) if a == '1' or a == '2' or a == '3': return False if a[0] == a[-1]: return True return False if __name__ == '__main__': assert palindromic(1) == False assert palindromic(2) == False assert palindromic(3) == False assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False """Let's refactor this""" def palindromic(n): a = str(n) if len(a) == 1: return False if a == '1' or a == '2' or a == '3': return False if a[0] == a[-1]: return True return False if __name__ == '__main__': assert palindromic(1) == False assert palindromic(2) == False assert palindromic(3) == False assert palindromic(4) == False assert palindromic(5) == False assert palindromic(0) == False assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False def palindromic(n): a = str(n) if len(a) == 1: return False if a[0] == a[-1]: return True return False if __name__ == '__main__': assert palindromic(1) == False assert palindromic(2) == False assert palindromic(3) == False assert palindromic(4) == False assert palindromic(5) == False assert palindromic(0) == False assert palindromic(11) == True assert palindromic(22) == True assert palindromic(33) == True assert palindromic(12) == False assert palindromic(21) == False assert palindromic(31) == False """ Finally I checked if the number is a palindromic number! """ ``` ### Now, I must to begin the next step. In this case, I must to find a number by the product of two 3-digit numbers ``` """To get all the possible 3-digit numbers:""" for i in range(100, 999): print(i) for i in range(100, 999): for j in range(100, 999): if i == j: n = i * j print(n) """I am doing this for I can see how my loop is doing. Therefore, I must to do it in understable way""" for i in range(100, 999): for j in range(100, 150): if i == j: n = i * j print(n) print(i, j) """So, let's see what numbers is a palindromic number:""" for i in range(100, 999): for j in range(100, 150): if i == j: n = i * j n = str(n) if n[0] == n[-1]: print(n) print(i, j) ``` ### Now I could see this way didn't work, because this 'rule' works only for the 2-digit numbers. So, I must change my 'rule'. ``` """As I am working with number as a string, I can do this way:""" n = 12345 d = str(n) print('Direct:' + d) r = str(n) print('Reverse:' + r[::-1]) """Now, let's do the test:""" if __name__ == '__main__': assert palindromic2(121) == True def palindromic2(n): return True if __name__ == '__main__': assert palindromic2(121) == True def palindromic2(n): return True if __name__ == '__main__': assert palindromic2(121) == True assert palindromic2(131) == True assert palindromic2(141) == True assert palindromic2(314) == False def palindromic2(n): d = str(n) #print('Direct:' + d) r = str(n) r = r[::-1] #print('Reverse:' + r[::-1]) if d == r: return True else: return False if __name__ == '__main__': assert palindromic2(121) == True assert palindromic2(131) == True assert palindromic2(141) == True assert palindromic2(314) == False def palindromic2(n): d = str(n) r = str(n) r = r[::-1] if d == r: return True else: return False if __name__ == '__main__': assert palindromic2(121) == True assert palindromic2(131) == True assert palindromic2(141) == True assert palindromic2(314) == False assert palindromic2(10) == False assert palindromic2(1) == False def palindromic2(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False if __name__ == '__main__': assert palindromic2(121) == True assert palindromic2(131) == True assert palindromic2(141) == True assert palindromic2(314) == False assert palindromic2(10) == False assert palindromic2(1) == False ``` ### So, I must to return for the loop. Now, I am going to check the given result: 9009 = 91 × 99. ``` """def palindromic2(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False""" lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] if r == n: m = int(n) lista.append(m) print(max(lista)) ``` ### At this point, I don't know how to test directly, therefore I must do it without a assert. But I know what I hope to see. ``` def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] if r == n: if palindromic3(r) == True: lista.append(r) print(lista) ``` ### As it works very well, I can expect to see the number given by the statement. ``` def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] if r == n: if palindromic3(r) == True: lista.append(r) return(max(lista)) if __name__ == '__main__': assert palindromic4() == 9009 palindromic4() """So, I must to fix this:""" def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] if r == n: if palindromic3(r) == True: lista.append(r) return(lista[-1]) if __name__ == '__main__': assert palindromic4() == 9009 palindromic4() """This value is a string value, not a numeric value""" def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] if r == n: if palindromic3(r) == True: lista.append(r) return(int(lista[-1])) if __name__ == '__main__': assert palindromic4() == 9009 """I have two possibilities so fix this: Or I set my assert to hope a str value, or I set my function to return a int value.""" #refactoring def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(1, 100): for j in range(1, 100): n = i * j n = str(n) r = n[::-1] #if r == n: here if palindromic3(r) == True: lista.append(r) return(int(lista[-1])) if __name__ == '__main__': assert palindromic4() == 9009 palindromic4() ``` ### As I could solve this problem to a product of two 2-digit numbers, I must to solve this problem to a product of two 3-digit numbers. ``` def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1 or len(d) == 2: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(100, 1001): for j in range(100, 1001): n = i * j n = str(n) if palindromic3(n) == True: lista.append(n) return(lista) palindromic4() ``` #### Now I could see that the last value isn't a higher value inside the list ``` def palindromic3(n): d = str(n) r = str(n) r = r[::-1] if len(d) == 1 or len(d) == 2: return False if d == r: return True else: return False def palindromic4(): lista = [] for i in range(100, 1001): for j in range(100, 1001): n = i * j n = str(n) if palindromic3(n) == True: n = int(n)#add this lista.append(n)#and this return(max(lista))#now the function can return a highest value from the list palindromic4() ``` ## Now, I only should to refact my code to make it more simple and beautifull, as the python's zen tell us.
github_jupyter
# カメラの位置姿勢を求める ``` import cv2 import numpy as np import matplotlib.pyplot as plt from skvideo.io import vread import moviepy.editor as mpy from tqdm import tqdm from mpl_toolkits.mplot3d import axes3d, Axes3D from IPython.display import Image def npy_to_gif(npy, filename): clip = mpy.ImageSequenceClip(list(npy), fps=10) clip.write_gif(filename) ``` ### 1. 素材 ``` vid = vread("src/shisa.mp4") print(vid.shape) ``` ### 2. アルコマーカー ``` aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50) aruco = cv2.aruco.drawMarker(aruco_dict, 0, 256) plt.figure(figsize=(3,3)); plt.imshow(aruco); plt.show() # cv2.imwrite("aruco.png", aruco) ``` ### 3. カメラの設定 ``` marker_length = 0.07 # [m] ### 注意! mtx = np.load("camera/mtx.npy") dist = np.load("camera/dist.npy") print(mtx); print(dist) ``` ### 4. マーカーの検出 ``` frame = vid[0] frame = frame[...,::-1] # BGR2RGB frame = cv2.resize(frame, (360, 640)) corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(frame, aruco_dict) rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(corners, marker_length, mtx, dist) # ---- 描画 frame = cv2.aruco.drawDetectedMarkers(frame, corners, ids) frame = cv2.aruco.drawAxis(frame, mtx, dist, rvec, tvec, marker_length/2) # ---- plt.imshow(frame[...,::-1]); plt.show() ``` ### 5. カメラの位置姿勢の計算 ``` # def eulerAnglesToRotationMatrix(euler): # R_x = np.array([[ 1, 0, 0], # [ 0, np.cos(euler[0]), -np.sin(euler[0])], # [ 0, np.sin(euler[0]), np.cos(euler[0])]]) # R_y = np.array([[ np.cos(euler[1]), 0, np.sin(euler[1])], # [ 0, 1, 0], # [-np.sin(euler[1]), 0, np.cos(euler[1])]]) # R_z = np.array([[ np.cos(euler[2]), -np.sin(euler[2]), 0], # [ np.sin(euler[2]), np.cos(euler[2]), 0], # [ 0, 0, 1]]) # R = np.dot(R_z, np.dot(R_y, R_x)) # return R XYZ = [] RPY = [] V_x = [] V_y = [] V_z = [] for frame in vid[:500:25]: # 全部処理すると重いので… frame = frame[...,::-1] # BGR2RGB frame = cv2.resize(frame, (360, 640)) corners, ids, _ = cv2.aruco.detectMarkers(frame, aruco_dict) rvec, tvec, _ = cv2.aruco.estimatePoseSingleMarkers(corners, marker_length, mtx, dist) R = cv2.Rodrigues(rvec)[0] # 回転ベクトル -> 回転行列 R_T = R.T T = tvec[0].T xyz = np.dot(R_T, - T).squeeze() XYZ.append(xyz) rpy = np.deg2rad(cv2.RQDecomp3x3(R_T)[0]) RPY.append(rpy) # print(rpy) # rpy = cv2.decomposeProjectionMatrix(np.hstack([R_T, -T]))[6] # [0~5]は使わない # rpy = np.deg2rad(rpy.squeeze()) # print(rpy) # r = np.arctan2(-R_T[2][1], R_T[2][2]) # p = np.arcsin(R_T[2][0]) # y = np.arctan2(-R_T[1][0], R_T[0][0]) # rpy = - np.array([r, p, y]) # print(rpy) # from scipy.spatial.transform import Rotation # diff = eulerAnglesToRotationMatrix(rpy) - R_T # print(diff.astype(np.float16)) # diff = Rotation.from_euler('xyz', rpy).as_matrix() - R_T # print(diff.astype(np.float16)) V_x.append(np.dot(R_T, np.array([1,0,0]))) V_y.append(np.dot(R_T, np.array([0,1,0]))) V_z.append(np.dot(R_T, np.array([0,0,1]))) # ---- 描画 # cv2.aruco.drawDetectedMarkers(frame, corners, ids, (0,255,255)) # cv2.aruco.drawAxis(frame, mtx, dist, rvec, tvec, marker_length/2) # cv2.imshow('frame', frame) # cv2.waitKey(1) # ---- cv2.destroyAllWindows() def plot_all_frames(elev=90, azim=270): frames = [] for t in tqdm(range(len(XYZ))): fig = plt.figure(figsize=(4,3)) ax = Axes3D(fig) ax.view_init(elev=elev, azim=azim) ax.set_xlim(-2, 2); ax.set_ylim(-2, 2); ax.set_zlim(-2, 2) ax.set_xlabel("x"); ax.set_ylabel("y"); ax.set_zlabel("z") x, y, z = XYZ[t] ux, vx, wx = V_x[t] uy, vy, wy = V_y[t] uz, vz, wz = V_z[t] # draw marker ax.scatter(0, 0, 0, color="k") ax.quiver(0, 0, 0, 1, 0, 0, length=1, color="r") ax.quiver(0, 0, 0, 0, 1, 0, length=1, color="g") ax.quiver(0, 0, 0, 0, 0, 1, length=1, color="b") ax.plot([-1,1,1,-1,-1], [-1,-1,1,1,-1], [0,0,0,0,0], color="k", linestyle=":") # draw camera if t < 5: ax.quiver(x, y, z, ux, vx, wx, length=0.5, color="k") ax.quiver(x, y, z, uy, vy, wy, length=0.5, color="k") ax.quiver(x, y, z, uz, vz, wz, length=0.5, color="k") else: ax.quiver(x, y, z, ux, vx, wx, length=0.5, color="r") ax.quiver(x, y, z, uy, vy, wy, length=0.5, color="g") ax.quiver(x, y, z, uz, vz, wz, length=0.5, color="b") # save for animation fig.canvas.draw() frames.append(np.array(fig.canvas.renderer.buffer_rgba())) plt.close() return frames frames = plot_all_frames(elev=105, azim=270) npy_to_gif(frames, "src/sample1.gif"); Image(url='src/sample1.gif') frames = plot_all_frames(elev=165, azim=270) npy_to_gif(frames, "src/sample2.gif"); Image(url='src/sample2.gif') plt.title("xyz"); plt.plot(XYZ); plt.show() # 青:x, 橙:y, 緑:z plt.title("rpy"); plt.plot(RPY); plt.show() # 青:r, 橙:p, 緑:y plt.title("(v_x)"); plt.plot(V_x); plt.show() plt.title("(v_y)"); plt.plot(V_y); plt.show() plt.title("(v_z)"); plt.plot(V_z); plt.show() ```
github_jupyter
# AOT Autograd - How to use and optimize? <a href="https://colab.research.google.com/github/pytorch/functorch/blob/main/notebooks/colab/aot_autograd_optimizations.ipynb"> <img style="width: auto" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> ## Background In this tutorial, we will learn how to use AOT Autograd to speedup training of deep learning models. For background, AOT Autograd is a toolkit to assist developers in accelerating training on PyTorch. Broadly, it has two key features * AOT Autograd traces the forward and backward graph ahead of time. Presence of forward and backward graph ahead of time facilitates joint graph optimizations such as recomputation or activation checkpointing. * AOT Autograd provides simple mechanisms to compile the extracted forward and backward graphs through deep learning compilers, such as NVFuser, NNC, TVM and others. ## What will you learn? In this tutorial, we will look at how AOT Autograd can be used, in conjunction with backend compilers, to accelerate the training of PyTorch models. More specifically, you will learn * How to use AOT Autograd? * How AOT Autograd uses backend compilers to perform operation fusion? * How AOT Autograd enables training-specific optimizations such as Recomputation? So, lets get started. ## Setup Let's setup a simple model. ``` import torch def fn(a, b, c, d): x = a + b + c + d return x.cos().cos() # Test that it works a, b, c, d = [torch.randn(2, 4, requires_grad=True) for _ in range(4)] ref = fn(a, b, c, d) loss = ref.sum() loss.backward() ``` # Use AOT Autograd Now, lets use AOT Autograd and look at the extracted forward and backward graphs. Internally, AOT uses `__torch_dispatch__` based tracing mechanism to extract forward and backward graphs, and wraps them in `torch.Fx` GraphModule containers. Note that AOT Autograd tracing is different from the usual Fx symbolic tracing. AOT Autograd uses Fx GraphModule just to represent the traced graphs (and not for tracing). AOT Autograd then sends these forward and backward graphs to the user supplied compilers. So, lets write a compiler that just prints the graph. ``` from functorch.compile import aot_function # The compiler_fn is called after the forward and backward graphs are extracted. # Here, we just print the code in the compiler_fn. Return of this function is a callable. def compiler_fn(fx_module: torch.fx.GraphModule, _): print(fx_module.code) return fx_module # Pass on the compiler_fn to the aot_function API aot_print_fn = aot_function(fn, fw_compiler=compiler_fn, bw_compiler=compiler_fn) # Run the aot_print_fn once to trigger the compilation and print the graphs res = aot_print_fn(a, b, c, d) assert torch.allclose(ref, res) from functorch.compile import clear_compile_cache clear_compile_cache() ``` The above code prints the Fx graph for the forward and backward graph. You can see that in addition to the original input of the forward pass, the forward graph outputs some additional tensors. These tensors are saved for the backward pass for gradient calculation. We will come back to these later while talking about recomputation. ## Operator Fusion Now that we understand how to use AOT Autograd to print forward and backward graphs, let us use AOT Autograd to use some actual deep learning compiler. In this tutorial, we use PyTorch Neural Network Compiler (NNC) to perform pointwise operator fusion for CPU devices. For CUDA devices, a suitable alternative is NvFuser. So, lets use NNC ``` # AOT Autograd has a suite of already integrated backends. Lets import the NNC compiler backend - ts_compile from functorch.compile import ts_compile # Lets compile the forward and backward through ts_compile. aot_nnc_fn = aot_function(fn, fw_compiler=ts_compile, bw_compiler=ts_compile) # Correctness checking. Lets clone the input so that we can check grads. cloned_inputs = [x.clone().detach().requires_grad_(True) for x in (a, b, c, d)] cloned_a, cloned_b, cloned_c, cloned_d = cloned_inputs res = aot_nnc_fn(*cloned_inputs) loss = res.sum() loss.backward() assert torch.allclose(ref, res) assert torch.allclose(a.grad, cloned_a.grad) assert torch.allclose(b.grad, cloned_b.grad) assert torch.allclose(c.grad, cloned_c.grad) assert torch.allclose(d.grad, cloned_d.grad) ``` Lets benchmark the original and AOT Autograd + NNC compiled function. ``` # Lets write a function to benchmark the forward and backward pass import time import statistics def bench(fn, args, prefix): warmup = 10 iterations = 100 for _ in range(warmup): ref = fn(*args) ref.sum().backward() fw_latencies = [] bw_latencies = [] for _ in range(iterations): for arg in args: arg.grad = None fw_begin = time.perf_counter() ref = fn(*args) fw_end = time.perf_counter() loss = ref.sum() bw_begin = time.perf_counter() loss.backward() bw_end = time.perf_counter() fw_latencies.append(fw_end - fw_begin) bw_latencies.append(bw_end - bw_begin) avg_fw_latency = statistics.mean(fw_latencies) * 10**6 avg_bw_latency = statistics.mean(bw_latencies) * 10**6 print(prefix, "Fwd = " + str(avg_fw_latency) + " us", "Bwd = " + str(avg_bw_latency) + " us", sep=', ') large_inputs = [torch.randn(1024, 2048, requires_grad=True) for _ in range(4)] # Benchmark the Eager and AOT Autograd functions bench(fn, large_inputs, "Eager") bench(aot_nnc_fn, large_inputs, "AOT") ``` With the help of NNC, AOT Autograd speeds up both the forward and backward pass. If we look at the printed graphs earlier, all the operators are pointwise. The pointwise operators are memory bandwidth bound, and thus benefit from operator fusion. Looking closely at the numbers, the backward pass gets higher speedup. This is because forward pass has to output some intermediate tensors for gradient calculation for the backward pass, preventing it from saving some memory reads and writes. However, such restriction does not exist in the backward graph. ## Recomputation (aka Activation Checkpointing) Recomputation (often called activation checkpointing) is a technique in which, instead of saving some activations for use in backwards, we recompute them **during** the backwards pass. Recomputing saves memory, but we incur performance overhead. However, in the presence of fusing compiler, we can do better that that. We can recompute the fusion-friendly operators to save memory, and then rely on the fusing compiler to fuse the recomputed operators. This reduces both memory and runtime. Please refer to this [discuss post](https://dev-discuss.pytorch.org/t/min-cut-optimal-recomputation-i-e-activation-checkpointing-with-aotautograd/467) for more details. Here, we use AOT Autograd with NNC to perform similar type of recomputation. At the end of `__torch_dispatch__` tracing, AOT Autograd has a forward graph and joint forward-backward graph. AOT Autograd then uses a partitioner to isolate the forward and backward graph. In the example above, we used a default partitioner. For this experiment, we will use another partitioner called `min_cut_rematerialization_partition` to perform smarter fusion-aware recomputation. The partitioner is configurable and one can write their own partitioner to plug it in AOT Autograd. ``` from functorch.compile import min_cut_rematerialization_partition # Lets set up the partitioner. Also set the fwd and bwd compilers to the printer function that we used earlier. # This will show us how the recomputation has modified the graph. aot_fn = aot_function(fn, fw_compiler=compiler_fn, bw_compiler=compiler_fn, partition_fn=min_cut_rematerialization_partition) res = aot_fn(a, b, c, d) ``` We can see that compared to default partitioner, forward pass now outputs fewer tensors, and recomputes some operations in the backward pass. Let us try NNC compiler now to perform operator fusions (note that we also have a wrapper function - `memory_efficient_fusion` which internally uses `min_cut_rematerialization_partition` and Torchscript compiler to achieve the same effect as following code). ``` # Lets set up the partitioner and NNC compiler. aot_recompute_nnc_fn = aot_function(fn, fw_compiler=ts_compile, bw_compiler=ts_compile, partition_fn=min_cut_rematerialization_partition) # Correctness checking. Lets clone the input so that we can check grads. cloned_inputs = [x.clone().detach().requires_grad_(True) for x in (a, b, c, d)] cloned_a, cloned_b, cloned_c, cloned_d = cloned_inputs res = aot_recompute_nnc_fn(*cloned_inputs) loss = res.sum() loss.backward() assert torch.allclose(ref, res) assert torch.allclose(a.grad, cloned_a.grad) assert torch.allclose(b.grad, cloned_b.grad) assert torch.allclose(c.grad, cloned_c.grad) assert torch.allclose(d.grad, cloned_d.grad) ``` Finally, lets benchmark the different functions ``` bench(fn, large_inputs, "Eager") bench(aot_nnc_fn, large_inputs, "AOT") bench(aot_recompute_nnc_fn, large_inputs, "AOT_Recomp") ``` We observe that both forward and backward latency improve over the default partitioner (and a lot better than eager). Fewer outputs in the forward pass and fewer inputs in the backward pass, along with fusion, allows better memory bandwidth utilization leading to further speedups.
github_jupyter
# Attach nearest network nodes to CHTS homes and workplaces ``` import numpy as np import pandas as pd from sklearn.neighbors import BallTree # identify bay area counties by fips code bayarea = {'Alameda':'001', 'Contra Costa':'013', 'Marin':'041', 'Napa':'055', 'San Francisco':'075', 'San Mateo':'081', 'Santa Clara':'085', 'Solano':'095', 'Sonoma':'097'} state_counties = ['06{}'.format(county) for county in bayarea.values()] ``` ## Load persons home and work data ``` # load persons and workplace data persons = pd.read_csv('data/chts/LookUp_PER.csv', dtype={'WBLOCK':str}) persons = persons[['SAMPN', 'PERNO', 'WXCORD', 'WYCORD', 'WBLOCK']] mask = persons['WBLOCK'].str.slice(0, 5).isin(state_counties) persons = persons[mask] # load homes data homes = pd.read_csv('data/chts/LookUp_Home.csv', dtype={'HBLOCK':str}) homes = homes[['SAMPN', 'HXCORD', 'HYCORD', 'HBLOCK']] mask = homes['HBLOCK'].str.slice(0, 5).isin(state_counties) homes = homes[mask] # merge homes and workplaces for each person df = pd.merge(persons, homes, how='inner', on='SAMPN').dropna() df = df.set_index(['SAMPN', 'PERNO']) assert df.index.is_unique ``` ## Find nearest network node to home and work locations For fast nearest-neighbor search, use a ball tree with the haversine distance metric. This will be more accurate than using Euclidean distance at this spatial scale, and faster than projecting data to a metric projection and then using Euclidean distance. If minor inaccuracy is acceptible in the name of greater speed, just use scipy's basic cKDTree and Euclidean distance instead. ``` # load the network nodes nodes = pd.read_csv('data/network/bay_area_tertiary_strongly_nodes.csv') nodes = nodes.set_index('osmid') assert nodes.index.is_unique # haversine requires data in form of [lat, lng] and inputs/outputs in units of radians nodes_rad = np.deg2rad(nodes[['y', 'x']]) homes_rad = np.deg2rad(df[['HYCORD', 'HXCORD']]) works_rad = np.deg2rad(df[['WYCORD', 'WXCORD']]) # build the tree for fast nearest-neighbor search tree = BallTree(nodes_rad, metric='haversine') # query the tree for nearest node to each home idx = tree.query(homes_rad, return_distance=False) df['HNODE'] = nodes.iloc[idx[:,0]].index # query the tree for nearest node to each workplace idx = tree.query(works_rad, return_distance=False) df['WNODE'] = nodes.iloc[idx[:,0]].index ``` ## Save to disk ``` # how many home and work locations are so close that they resolve to same network node? len(df[df['HNODE']==df['WNODE']]) # save ODs to disk df_save = df[['HNODE', 'WNODE']].rename(columns={'HNODE':'orig', 'WNODE':'dest'}) df_save.to_csv('data/od.csv', index=True, encoding='utf-8') ```
github_jupyter
# Eaton method with well log Pore pressure prediction with Eaton's method using well log data. Steps: 1. Calculate Velocity Normal Compaction Trend 2. Optimize for Eaton's exponent n 3. Predict pore pressure using Eaton's method ``` import warnings warnings.filterwarnings(action='ignore') # for python 2 and 3 compatibility # from builtins import str # try: # from pathlib import Path # except: # from pathlib2 import Path #-------------------------------------------- import sys ppath = "../.." if ppath not in sys.path: sys.path.append(ppath) #-------------------------------------------- from __future__ import print_function, division, unicode_literals %matplotlib inline import matplotlib.pyplot as plt plt.style.use(['seaborn-paper', 'seaborn-whitegrid']) plt.rcParams['font.sans-serif']=['SimHei'] plt.rcParams['axes.unicode_minus']=False import numpy as np import pygeopressure as ppp ``` ## 1. Calculate Velocity Normal Compaction Trend Create survey with the example survey `CUG`: ``` # set to the directory on your computer SURVEY_FOLDER = "C:/Users/yuhao/Desktop/CUG_depth" survey = ppp.Survey(SURVEY_FOLDER) ``` Retrieve well `CUG1`: ``` well_cug1 = survey.wells['CUG1'] ``` Get velocity log: ``` vel_log = well_cug1.get_log("Velocity") ``` View velocity log: ``` fig_vel, ax_vel = plt.subplots() ax_vel.invert_yaxis() vel_log.plot(ax_vel) well_cug1.plot_horizons(ax_vel) # set fig style ax_vel.set(ylim=(5000,0), aspect=(5000/4600)*2) ax_vel.set_aspect(2) fig_vel.set_figheight(8) ``` Optimize for NCT coefficients a, b: `well.params['horizon']['T20']` returns the depth of horizon T20. ``` a, b = ppp.optimize_nct( vel_log=well_cug1.get_log("Velocity"), fit_start=well_cug1.params['horizon']["T16"], fit_stop=well_cug1.params['horizon']["T20"]) ``` And use a, b to calculate normal velocity trend ``` from pygeopressure.velocity.extrapolate import normal_log nct_log = normal_log(vel_log, a=a, b=b) ``` View fitted NCT: ``` fig_vel, ax_vel = plt.subplots() ax_vel.invert_yaxis() # plot velocity vel_log.plot(ax_vel, label='Velocity') # plot horizon well_cug1.plot_horizons(ax_vel) # plot fitted nct nct_log.plot(ax_vel, color='r', zorder=2, label='NCT') # set fig style ax_vel.set(ylim=(5000,0), aspect=(5000/4600)*2) ax_vel.set_aspect(2) ax_vel.legend() fig_vel.set_figheight(8) ``` Save fitted nct: ``` # well_cug1.params['nct'] = {"a": a, "b": b} # well_cug1.save_params() ``` ## 2. Optimize for Eaton's exponent n First, we need to preprocess velocity. Velocity log processing (filtering and smoothing): ``` vel_log_filter = ppp.upscale_log(vel_log, freq=20) vel_log_filter_smooth = ppp.smooth_log(vel_log_filter, window=1501) ``` Veiw processed velocity: ``` fig_vel, ax_vel = plt.subplots() ax_vel.invert_yaxis() # plot velocity vel_log.plot(ax_vel, label='Velocity') # plot horizon well_cug1.plot_horizons(ax_vel) # plot processed velocity vel_log_filter_smooth.plot(ax_vel, color='g', zorder=2, label='Processed', linewidth=1) # set fig style ax_vel.set(ylim=(5000,0), aspect=(5000/4600)*2) ax_vel.set_aspect(2) ax_vel.legend() fig_vel.set_figheight(8) ``` We will use the processed velocity data for pressure prediction. Optimize Eaton's exponential `n`: ``` n = ppp.optimize_eaton( well=well_cug1, vel_log=vel_log_filter_smooth, obp_log="Overburden_Pressure", a=a, b=b) ``` See the RMS error variation with `n`: ``` from pygeopressure.basic.plots import plot_eaton_error fig_err, ax_err = plt.subplots() plot_eaton_error( ax=ax_err, well=well_cug1, vel_log=vel_log_filter_smooth, obp_log="Overburden_Pressure", a=a, b=b) ``` Save optimized n: ``` # well_cug1.params['nct'] = {"a": a, "b": b} # well_cug1.save_params() ``` ## 3.Predict pore pressure using Eaton's method Calculate pore pressure using Eaton's method requires velocity, Eaton's exponential, normal velocity, hydrostatic pressure and overburden pressure. `Well.eaton()` will try to read saved data, users only need to specify them when they are different from the saved ones. ``` pres_eaton_log = well_cug1.eaton(vel_log_filter_smooth, n=n) ``` View predicted pressure: ``` fig_pres, ax_pres = plt.subplots() ax_pres.invert_yaxis() well_cug1.get_log("Overburden_Pressure").plot(ax_pres, 'g', label='Lithostatic') ax_pres.plot(well_cug1.hydrostatic, well_cug1.depth, 'g', linestyle='--', label="Hydrostatic") pres_eaton_log.plot(ax_pres, color='blue', label='Pressure_Eaton') well_cug1.plot_horizons(ax_pres) # set figure and axis size ax_pres.set_aspect(2/50) ax_pres.legend() fig_pres.set_figheight(8) ```
github_jupyter
# Matrix Addition In this exercises, you will write a function that accepts two matrices and outputs their sum. Think about how you could do this with a for loop nested inside another for loop. ``` ### TODO: Write a function called matrix_addition that ### calculate the sum of two matrices ### ### INPUTS: ### matrix A _ an m x n matrix ### matrix B _ an m x n matrix ### ### OUPUT: ### matrixSum _ sum of matrix A + matrix B def matrix_addition(matrixA, matrixB): # initialize matrix to hold the results matrixSum = [] # matrix to hold a row for appending sums of each element # TODO: write a for loop within a for loop to iterate over # the matrices for r in range(len(matrixA)): row = [] for c in range(len(matrixA[0])): row.append(matrixA[r][c] + matrixB[r][c]) matrixSum.append(row) # TODO: As you iterate through the matrices, add matching # elements and append the sum to the row variable # TODO: When a row is filled, append the row to matrixSum. # Then reinitialize row as an empty list return matrixSum ### When you run this code cell, your matrix addition function ### will run on the A and B matrix. A = [ [2,5,1], [6,9,7.4], [2,1,1], [8,5,3], [2,1,6], [5,3,1] ] B = [ [7, 19, 5.1], [6.5,9.2,7.4], [2.8,1.5,12], [8,5,3], [2,1,6], [2,33,1] ] matrix_addition(A, B) ``` ### Vectors versus Matrices What happens if you run the cell below? Here you are adding two vectors together. Does your code still work? ``` matrix_addition([4, 2, 1], [5, 2, 7]) ``` Why did this error occur? Because your code assumes that a matrix is a two-dimensional grid represented by a list of lists. But a horizontal vector, which can also be considered a matrix, is a one-dimensional grid represented by a single list. What happens if you store a vector as a list of lists like [[4, 2, 1]] and [[5, 2, 7]]? Does your function work? Run the code cell below to find out. ``` matrix_addition([[4, 2, 1]], [[5, 2, 7]]) ``` ### Test your Code Run the cell below. If there is no output, then your results are as expected. ``` assert matrix_addition([ [1, 2, 3]], [[4, 5, 6]]) == [[5, 7, 9]] assert matrix_addition([ [4]], [ [5]]) == [[9]] assert matrix_addition([[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]) == [[8, 10, 12], [14, 16, 18]] ```
github_jupyter
### Details on the hardware used to gather the performance data ``` import pandas as pd from collections import OrderedDict as odict #name, cache-size (in kB) hardware = odict({}) hardware['i5'] = ('Intel Core i5-6600 @ 3.30GHz (2x 8GB DDR4, 4 cores)',6144, '1 MPI task x 4 OpenMP threads (1 per core)') hardware['skl'] = ('2x Intel Xeon 8160 (Skylake) at 2.10 GHz (12x 16GB DDR4, 2x 24 cores)',2*33000, '2 MPI tasks (1 per socket) x 24 OpenMP threads (1 per core)') hardware['knl'] = ('Intel Xeon Phi 7250 (Knights Landing) at 1.40 GHz (16GB MCDRAM, 68 cores)',34000, '1 MPI task x 136 OpenMP hyperthreads (2 per core)') hardware['gtx1060'] = ('Nvidia GeForce GTX 1060 (6GB global memory)',1572.864, '1 MPI task per GPU') hardware['p100'] = ('Nvidia Tesla P100-PCIe (16GB global memory)',4194.304, '1 MPI task per GPU') hardware['v100'] = ('Nvidia Tesla V100-PCIe (16GB global memory)',6291.456, '1 MPI task per GPU') hardware['p100nv'] = ('Nvidia Tesla P100-Power8 (16GB global memory)',4194.304, '1 MPI task per GPU') hardware['v100nv'] = ('Nvidia Tesla V100-Power9 (16GB global memory)',6291.456, '1 MPI task per GPU') memory =odict({}) #find with 'dmidecode --type 17' #name, I/O bus clockrate (MHz) , buswidth (bit), size (MB), memory['i5'] = ('2x 8GB Kingston DDR4', 1066, 2*64, 2*8192) #ECC: no (actually it is DDR4-2400 but i5 has max DDR4-2133) memory['skl'] = ('12x 16GB DDR4',1333,12*64,12*16000) #ECC: ? memory['knl'] = ('MCDRAM',None,None,16000) #ECC: ? memory['gtx1060'] = ('on-card global memory',4004,192,6069) #ECC: no memory['p100'] = ('on-card global memory',715,4096,16276) # ECC: yes memory['v100'] = ('on-card global memory',877,4096,16152) # ECC: yes compiler=odict({}) compiler['i5'] = ('mpic++ (gcc-5.4) -mavx -mfma -O3 -fopenmp') compiler['skl'] = ('mpiicc-17.0.4 -mt_mpi -xCORE-AVX512 -mtune=skylake -O3 -restrict -fp-model precise -fimf-arch-consistency=true -qopenmp') compiler['knl'] = ('mpiicc-17.0.4 -mt_mpi -xMIC-AVX512 -O3 -restrict -fp-model precise -fimf-arch-consistency=true -qopenmp') compiler['gtx1060'] = ('nvcc-7.0 --compiler-bindir mpic++ (gcc-5.4) -O3 -arch sm_35') compiler['p100'] = ('nvcc-8.0 --compiler-bindir mpic++ (gcc-5.4) -O3 -arch sm_60 -Xcompiler "-O3 -mavx -mfma"') compiler['v100'] = ('nvcc-8.0 --compiler-bindir mpic++ (gcc-5.4) -O3 -arch sm_60 -Xcompiler "-O3 -mavx -mfma"') df = pd.DataFrame(hardware) df = df.transpose() df.columns= ['device-name', 'cache-size-kB','single-node configuration'] com = pd.DataFrame(compiler, index=['compiler flags']) com = com.transpose() com df = df.join(com) mem = pd.DataFrame(memory) mem = mem.transpose() mem.columns = ['mem-description', 'clockrate-MHz', 'buswidth-bit', 'size-MB'] df=df.join(mem) #df ``` From the available data we can compute the theoretical memory bandwidth via $$bw = 2*clockrate*buswidth$$ where the '2' is for double data rate (DDR) ``` df['bandwidth'] = 2*df['clockrate-MHz']*1e6*df['buswidth-bit']/8/1e9 #df ``` Let us compare the theoretical bandwidth with our previously measured peak bandwidth from axpby ``` exp = pd.read_csv('performance.csv',delimiter=' ') exp.set_index('arch',inplace=True) exp.index.name = None df = df.join(exp['axpby_bw']) df['mem_efficiency']=df['axpby_bw']/df['bandwidth']*100 pd.set_option('display.float_format', lambda x: '%.2f' % x) df ``` Let us write a summarized LateX table to be used for publication ``` pd.set_option('precision',3) file = df.loc[:,['device-name','single-node configuration']]#,'bandwidth']] #file.loc['knl','bandwidth'] = '>400' file.columns = ['device description', 'single-node configuration']#, 'bandwidth [GB/s]'] filename='hardware.tex' df.loc['knl','bandwidth'] = '$>$400' pd.set_option('display.max_colwidth', 200) with open(filename, 'wb') as f: f.write(bytes(file.to_latex( column_format='@{}lp{6.5cm}p{5cm}@{}', bold_rows=True),'UTF-8')) file ```
github_jupyter
``` import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator import scipy import os import cv2 import random from skimage import io import seaborn as sns from matplotlib import pyplot import pandas as pd import tensorflow.keras.backend as K import numpy as np np.random.seed(13) from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt NasNet = tf.keras.models.load_model('my_firstModel') MobileNet = tf.keras.models.load_model('my_secondModel') MobileNetV2 = tf.keras.models.load_model('my_thirdModel') EfficientNet = tf.keras.models.load_model('my_forthModel') TrainedModel_0 = [NasNet,MobileNet,MobileNetV2,EfficientNet] TrainedModel_1 = [NasNet,MobileNet] TrainedModel_2 = [MobileNetV2,EfficientNet] TrainedModel_3 = [NasNet,EfficientNet] TrainedModel_4 = [MobileNet,MobileNetV2] TrainedModel_5 = [NasNet,MobileNet,MobileNetV2] TrainedModel_6 = [MobileNet,MobileNetV2,EfficientNet] TrainedModel_7 = [NasNet,MobileNet,EfficientNet] TrainedModel_8 = [NasNet,MobileNetV2,EfficientNet] #Changed to working directory to the location of the dataset and loaded the dataset description for local host. os.chdir("..\kaggle_3m") brain_df = pd.read_csv('data_frame.csv') #For google colab #brain_df = pd.read_csv('/content/drive/MyDrive/kaggle_3m/Gdrive.csv') # Here we decided to drop unncessary coloums from the dataset brain_df_train = brain_df.drop(columns=['Unnamed: 0', 'patient'], axis=1) brain_df_train['diagnosis'] = brain_df['diagnosis'].apply(lambda x: str(x)) #changes the type of the values of the column to sting brain_df_train.info() train, test = train_test_split(brain_df_train, test_size=0.15) #splits the data into training and testing sets datagen = ImageDataGenerator(rescale=1./255., validation_split=0.1) train_generator = datagen.flow_from_dataframe(train, directory='./', x_col='image_path', y_col='diagnosis', subset='training', class_mode='categorical', batch_size=16, shuffle=True, target_size=(224,224) ) valid_generator = datagen.flow_from_dataframe(train, directory='./', x_col='image_path', y_col='diagnosis', subset='validation', class_mode='categorical', batch_size=16, shuffle=True, target_size=(224,224) ) test_datagen = ImageDataGenerator(rescale=1./255.) test_generator = test_datagen.flow_from_dataframe(test, directory='./', x_col='image_path', y_col='diagnosis', class_mode='categorical', batch_size=16, shuffle=False, target_size=(224,224) ) #For TrainedModel_0 labels_0 = [] for m in TrainedModel_0: predicts = np.argmax(m.predict(test_generator), axis=1) labels_0.append(predicts) # Ensemble with voting labels_0 = np.array(labels_0) labels_0 = np.transpose(labels_0, (1, 0)) labls_0 = scipy.stats.mode(labels_0,axis=1)[0] labls_0 = np.squeeze(labls_0) #For TrainedModel_1 labels_1 = [] for m in TrainedModel_1: predicts = np.argmax(m.predict(test_generator), axis=1) labels_1.append(predicts) # Ensemble with voting labels_1 = np.array(labels_1) labels_1 = np.transpose(labels_1, (1, 0)) labls_1 = scipy.stats.mode(labels_1,axis=1)[0] labls_1 = np.squeeze(labls_1) #For TrainedModel_2 labels_2 = [] for m in TrainedModel_2: predicts = np.argmax(m.predict(test_generator), axis=1) labels_2.append(predicts) # Ensemble with voting labels_2 = np.array(labels_2) labels_2 = np.transpose(labels_2, (1, 0)) labls_2 = scipy.stats.mode(labels_2,axis=1)[0] labls_2 = np.squeeze(labls_2) #For TrainedModel_3 labels_3 = [] for m in TrainedModel_3: predicts = np.argmax(m.predict(test_generator), axis=1) labels_3.append(predicts) # Ensemble with voting labels_3 = np.array(labels_3) labels_3 = np.transpose(labels_3, (1, 0)) labls_3 = scipy.stats.mode(labels_3,axis=1)[0] labls_3 = np.squeeze(labls_3) #For TrainedModel_4 labels_4 = [] for m in TrainedModel_4: predicts = np.argmax(m.predict(test_generator), axis=1) labels_4.append(predicts) # Ensemble with voting labels_4 = np.array(labels_4) labels_4 = np.transpose(labels_4, (1, 0)) labls_4 = scipy.stats.mode(labels_4,axis=1)[0] labls_4 = np.squeeze(labls_4) #For TrainedModel_5 labels_5 = [] for m in TrainedModel_5: predicts = np.argmax(m.predict(test_generator), axis=1) labels_5.append(predicts) # Ensemble with voting labels_5 = np.array(labels_5) labels_5 = np.transpose(labels_5, (1, 0)) labls_5 = scipy.stats.mode(labels_5,axis=1)[0] labls_5 = np.squeeze(labls_5) #For TrainedModel_6 labels_6 = [] for m in TrainedModel_6: predicts = np.argmax(m.predict(test_generator), axis=1) labels_6.append(predicts) # Ensemble with voting labels_6 = np.array(labels_6) labels_6 = np.transpose(labels_6, (1, 0)) labls_6 = scipy.stats.mode(labels_6,axis=1)[0] labls_6 = np.squeeze(labls_6) #For TrainedModel_7 labels_7 = [] for m in TrainedModel_7: predicts = np.argmax(m.predict(test_generator), axis=1) labels_7.append(predicts) # Ensemble with voting labels_7 = np.array(labels_7) labels_7 = np.transpose(labels_7, (1, 0)) labls_7 = scipy.stats.mode(labels_7,axis=1)[0] labls_7 = np.squeeze(labls_7) #For TrainedModel_8 labels_8 = [] for m in TrainedModel_8: predicts = np.argmax(m.predict(test_generator), axis=1) labels_8.append(predicts) # Ensemble with voting labels_8 = np.array(labels_8) labels_8 = np.transpose(labels_8, (1, 0)) labls_8 = scipy.stats.mode(labels_8,axis=1)[0] labls_8= np.squeeze(labls_8) original = np.asarray(test['diagnosis']).astype('int') from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import seaborn as sns cm_0 = confusion_matrix(original, labls_0) cm_1 = confusion_matrix(original, labls_1) cm_2 = confusion_matrix(original, labls_2) cm_3 = confusion_matrix(original, labls_3) cm_4 = confusion_matrix(original, labls_4) cm_5 = confusion_matrix(original, labls_5) cm_6 = confusion_matrix(original, labls_6) cm_7 = confusion_matrix(original, labls_7) cm_8 = confusion_matrix(original, labls_8) report_0 = classification_report(original, labls_0, labels = [0,1]) report_1 = classification_report(original, labls_1, labels = [0,1]) report_2 = classification_report(original, labls_2, labels = [0,1]) report_3 = classification_report(original, labls_3, labels = [0,1]) report_4 = classification_report(original, labls_4, labels = [0,1]) report_5 = classification_report(original, labls_5, labels = [0,1]) report_6 = classification_report(original, labls_6, labels = [0,1]) report_7 = classification_report(original, labls_7, labels = [0,1]) report_8 = classification_report(original, labls_8, labels = [0,1]) print(report_0) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_0, annot=True) print(report_1) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_1, annot=True) print(report_2) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_2, annot=True) print(report_3) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_3, annot=True) print(report_4) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_4, annot=True) print(report_5) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_5, annot=True) print(report_6) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_6, annot=True) print(report_7) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_7, annot=True) print(report_8) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_8, annot=True) # Here we create a figure instance, and two subplots fig = plt.figure(figsize = (20,20)) # width x height ax1 = fig.add_subplot(2, 2, 1) # row, column, position ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) # We use ax parameter to tell seaborn which subplot to use for this plot sns.heatmap(cm_0, ax=ax1, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_1, ax=ax2, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_2, ax=ax3, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_3, ax=ax4, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) fig = plt.figure(figsize = (20,20)) ax1 = fig.add_subplot(2, 2, 1) ax2 = fig.add_subplot(2, 2, 2) ax3 = fig.add_subplot(2, 2, 3) ax4 = fig.add_subplot(2, 2, 4) sns.heatmap(cm_4, ax=ax1, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_5, ax=ax2, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_6, ax=ax3, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) sns.heatmap(cm_7, ax=ax4, square=True, cbar_kws={'shrink': .3}, annot=True, annot_kws={'fontsize': 12}) pyplot.figure(figsize = (5,5)) sns.heatmap(cm_8, annot=True) #plot graphs data = {'Combo 1 ':93, 'Combo 2':96, 'Combo 3':64, 'Combo 4':64, 'Combo 5':93, 'Combo 6':90, 'Combo 7':93, 'Combo 8':96,'Combo 9':87} names = list(data.keys()) values = list(data.values()) fig = plt.figure(figsize = (10, 5)) # creating the bar plot plt.bar(names, values, color ='blue', width = 0.4) plt.xlabel("The different Combinations") plt.ylabel("Accuracy of the models") plt.title("Graph of Accuracy vs several model combinations") plt.show() # set width of bar barWidth = 0.25 fig = plt.subplots(figsize =(12, 8)) # set height of bar Accuracy = [93, 96, 64, 64, 93, 90, 93, 96, 87] Precision = [91, 96, 64, 64, 91, 96, 91, 96, 90] Recall = [99, 97, 100, 100, 99, 88, 99, 97, 90] # Set position of bar on X axis br1 = np.arange(len(Recall)) br2 = [x + barWidth for x in br1] br3 = [x + barWidth for x in br2] br4 = [x + barWidth for x in br3] br5 = [x + barWidth for x in br4] br6 = [x + barWidth for x in br5] br7 = [x + barWidth for x in br6] br8 = [x + barWidth for x in br7] br9 = [x + barWidth for x in br8] # Make the plot plt.bar(br1, Accuracy, color ='black', width = barWidth, edgecolor ='grey', label ='Accuracy') plt.bar(br2, Recall, color ='yellow', width = barWidth, edgecolor ='grey', label ='Recall') plt.bar(br3, Precision, color ='grey', width = barWidth, edgecolor ='grey', label ='precision') # Adding Xticks plt.xlabel('Combinations', fontweight ='bold', fontsize = 15) plt.ylabel('Performance', fontweight ='bold', fontsize = 15) plt.xticks([r + barWidth for r in range(len(Recall))], ['Combo1', 'Combo2', 'Combo3', 'Combo4', 'Combo5','Combo6', 'Combo7', 'Combo8', 'Combo9']) plt.grid() plt.legend() plt.show() ```
github_jupyter
<div class="alert alert-block alert-info"><b></b> <h1><center> <font color='black'> Homework 01 </font></center></h1> <h2><center> <font color='black'> Introduction and first look at the data </font></center></h2> <h2><center> <font color='black'> BDA - University of Tartu - Spring 2020</font></center></h3> </div> # 1. Python and Pandas (4.5 points) * In this course we are going to use Python, and mainly Pandas data structures and data analysis tool. please make sure that you have went through the practise session carefully. After that please answer the following questions below about the abalone dataset. First, let's import pandas package as pd and read data from the CSV file into a pandas DataFrame. ``` # Remove future warning message import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import pandas as pd data = pd.read_csv("abalone.csv") data ``` `Question 0` is an example of how you should answer the following questions. First, in the `#TODO` cell you have to write the code representing your solution. And in the answer cell write the answer with your own words. It does not have to be a full sentence. **1.0. What is overall memory consumption of the data?** (EXAMPLE) ``` # TODO - here you have to write the code how did you find the answer. data.memory_usage(deep=True).sum() ``` **<font color='red'>Answer:</font>** The total memory usage is 102080 bytes (102,08 kb). **1.1. What are the column names of the dataset?** **(0.25 point)** ``` # TODO data.columns ``` **<font color='red'>Answer:</font>** 'Gender', 'Length', 'Diameter', 'Height', 'Weight', 'Rings' **1.2. How many observations (i.e. rows) are in this data frame?** **(0.25 point)** ``` # TODO data.index ``` **<font color='red'>Answer:</font>** 1000 **1.3. Print the first 5 lines from the dataset. What are the values of feature "Rings" of the printed observations?** **(0.25 point)** ``` # TODO data.head() ``` **<font color='red'>Answer:</font>** 12, 9, 8, 7, 10 **1.4. Extract the last 3 rows of the data frame. What is the "Weight" of these abalones?** **(0.25 point)** ``` # TODO data['Weight'].tail(3) ``` **<font color='red'>Answer:</font>** 1.3195, 0.6455, 1.0070 **1.5. What is the value of diameter in the row 577?** **(1 point)** ``` # TODO data.loc[576]['Diameter'] ``` **<font color='red'>Answer:</font>** 0.51 **1.6. What is the mean of the height column?** **(0.5 point)** <br> Hint 1: Use pandas describe() method ``` # TODO data['Height'].describe() ``` **<font color='red'>Answer:</font>** 0.141100 **1.7. Extract the subset of rows of the data frame where gender is M and weight values are below 0.75. What is the mean of diameter in this subset?** **(1 point)** ``` # TODO data[(data['Gender'] == "M") & (data['Weight'] < 0.75)].describe() ``` **<font color='red'>Answer:</font>** 0.342647 **1.8. What is the minimum of length when rings is equal to 18?** **(1 point)** ``` # TODO data[data['Rings'] == 18].describe() ``` **<font color='red'>Answer:</font>** 0.465000 # 2. Data understanding (2.5 point) Collecting, describing, exploring and verifying data quality are <b>very important</b> steps in any data science task. In this exercise the main goal is to understand the attributes in the 'adult' dataset.<br> **2.0. Load data from the `adult.csv` file.** <br> Hint 1: The extension of the filename may not always reflect the structure of data precisely. Are the values comma-separated (csv) or not? ``` import numpy as np import pandas as pd # TODO data = pd.read_table('adult.csv', index_col='X') data ``` **2.1. Name all the columns in this dataset and describe the meaning of each column with one sentence.** <br> ``` # TODO data.columns ``` **<font color='red'>Answer:</font>** - age = the current age of the adult person - workclass = the type of work including: private, self-emp-inc, state-gov - education = the latest education level - occupation = the name of the job - capital.gain = a rise in the value of a capital asset (investment or real estate) that gives it a higher worth than the purchase price [(reference)](https://www.investopedia.com/terms/c/capitalgain.asp) - capital.loss = the loss incurred when a capital asset, such as an investment or real estate, decreases in value [(reference)](https://www.investopedia.com/terms/c/capitalloss.asp) - native.country = the country someone is born in or native to - salaries = the annual salary of person in USD - jobsatisfication = a measure of workers' contentedness with their job, whether or not they like the job or individual aspects or facets of jobs, such as nature of work or supervision. [(reference)](https://en.wikipedia.org/wiki/Job_satisfaction) - male = describe the gender is male or not - female = describe the gender is female or not #### 2.1.1. In the following subtasks, you will have to visualise the distribution of the values for different features using appropriate diagrams. Visualisation is one of the easiest ways to get a good overview of the data and to notice any oddities, outliers etc. ``` # Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats # and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shells, # the Jupyter notebook, web application servers, and four graphical user interface toolkits. import matplotlib.pyplot as plt # allows to output plots in the notebook %matplotlib inline # Set the default style plt.style.use("ggplot") ``` As an example, here is the age frequency histogram, where age is an attribute of type **numeric**: ``` # Columns may contain non-numeric values, errors or missing values. Therefore, non-numeric values must be dealt with. if data is not ...: pd.to_numeric(data['age'], errors='coerce').hist(bins=30) # ‘coerce’ -> invalid parsing will be set as NaN else: print('Please define `data` in earlier subtasks') ``` **2.2. Attribute `Workclass`. First, specify the type of the attribute (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the distribution in the relative frequency scale.** **<font color='red'>Attribute type :</font>** Nominal ``` # TODO # data.workclass.value_counts().plot(kind="bar") fig = plt.figure(figsize=[16, 8]) ax = plt.subplot(111) ax.hist(data['workclass'], width = 0.5, weights = np.ones(len(data['workclass'])) / len(data['workclass']) ) ax.set_title('Workclass - Distribution in Relative Frequency') plt.text plt.xticks(rotation=90) plt.show() ``` **2.3. Attribute `Education`. First, specify the type of the attribute (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the distribution in the relative frequency scale.** **<font color='red'>Attribute type :</font>** Ordinal ``` # TODO fig = plt.figure(figsize=[16, 8]) ax2 = plt.subplot(111) ax2.hist(data['education'], width = 0.5, weights = np.ones(len(data['education'])) / len(data['education']) ) ax2.set_title('Education - Distribution in Relative Frequency') plt.xticks(rotation=90) plt.show() ``` **2.4. Attribute `Occupation`. First, specify the type of the attribute (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the distribution in the relative frequency scale.** **<font color='red'>Attribute type :</font>** Nominal ``` # TODO fig = plt.figure(figsize=[16, 8]) ax3 = plt.subplot(111) ax3.hist(data['occupation'], width = 0.5, weights = np.ones(len(data['occupation'])) / len(data['occupation']) ) ax3.set_title('Occupation - Distribution in Relative Frequency') plt.xticks(rotation=90) plt.show() ``` **2.5. Attribute `Native Country`. First, specify the type of the attribute (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the distribution in the relative frequency scale.** **<font color='red'>Attribute type :</font>** Nominal ``` # TODO fig = plt.figure(figsize=[16, 8]) ax4 = plt.subplot(111) ax4.hist(data['native.country'], width = 1, weights = np.ones(len(data['native.country'])) / len(data['native.country']) ) ax4.set_title('Native Country - Distribution in Relative Frequency') plt.xticks(rotation=90) plt.show() ``` **2.6. Attribute `Salaries`. First, specify the type of the attribute (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the distribution in the density scale.** **<font color='red'>Attribute type :</font>** numeric ``` # TODO import seaborn as sns sns.distplot(data['salaries'], hist=True, kde=True, bins=20, color = 'darkblue', hist_kws={'edgecolor':'black'}, kde_kws={'linewidth': 2}) ``` **2.7. Attributes `Male` and `Female`. First, specify the types of these attributes (nominal / ordinal / numeric) and second, choose an appropriate chart type and visualise the relative frequency of positive values in attribute `Male` and positive values in attribute `Female` (relative frequency of males and females).** **<font color='red'>Attribute type :</font>** Nominal ``` # TODO - Put Male and Female attributes on the same plot for comparison data['gender'] = data['male'].apply(lambda x: 'Male' if x == 1.0 else 'Female') # TODO fig = plt.figure(figsize=[8, 8]) ax4 = plt.subplot(111) ax4.hist(data['gender'], width = 0.5, weights = np.ones(len(data['gender'])) / len(data['gender']) ) ax4.set_title('Gender - Distribution in Relative Frequency') plt.xticks(rotation=90) plt.show() data ``` **2.8. Count the number of missing values in each attribute in the dataset and also report the percentage of missing values out of all rows (the original file uses '?' for missing values)** ``` #TODO total_rows = len(data.index) total_missing = 0 if data is not data.empty: for column in data.columns: total_missing = len(data[data[column] == '?']) if total_missing > 0: print (f"Percentage of missing values in attribute {column} = {round((total_missing/total_rows)*100,2)}%") else: print('Please define `data` in earlier subtasks') ``` # 3. Data preparation (1.5 points) Preparing the data for analysis is the most crucial phase in data analysis. In this exercise, the main goal is to clean up the dataset 'adult'. The dataset has been made unclean on purpose so that you can practice cleaning it up. Most of the introduced uncleanliness is of the same kinds as described in slides of Lecture 02. Additionally, some values have been introduced which are non-sensical due to the meaning of the attribute.<br> First, let's make a deep copy of the original DataFrame. Later we will compare it to the cleaned/modified DataFrame. ``` if data is not data.empty: original_data = data.copy(deep=True) # Make a deep copy, including a copy of the data and the indices ``` **3.1. Sometimes values include leading and/or trailing spaces. Remove leading and trailing whitespace from strings. Count how many rows were changed in the dataset.** <br> ``` # TODO for column in data.columns: if data[column].dtype == 'O' : #dtype == 'O' stands for Python object, to check if data type is string or not data[column] = data[column].str.strip() else: print(f"column {column} is not string") ``` Use code below to count how many rows were affected. ``` # Count differences if data is not data.empty: data_all = pd.concat([original_data, data]).drop_duplicates() diff = data_all.shape[0] - data.shape[0] print ('Difference: ' + str(diff)) ``` **<font color='red'>Answer:</font>** 32561 **3.2. Replace all non-sensical values in `data` with np.nan from numpy package to denote missing values (Original file uses '?' for missing values). Fix the typos in the dataset. Justify all the changes that you do, explaining why you are sure that this is a correct change and what you think the cause for such error could be.** <br> ``` # TODO data_clean = data.replace(['?','privat','UnitedStates', 'Unitedstates', 'Hong'], [np.nan,'Private','United-States', 'United-States', 'Hong-Kong']) data_clean.head(10) ``` **<font color='red'>Answer:</font>** Beside replace `?` with `NaN` value, we also replace this string `['privat','UnitedStates', 'Unitedstates', 'Hong']` into `['Private','United-States', 'United-States', 'Hong-Kong']` to fix the typo and reduce the redudancy of the same value (for example: United-States and UnitedStates is the same value in `native.country`. **3.3. Count the rows which were changed in the subtask b** <br> ``` # TODO if data_clean is not data_clean.empty: data_all = pd.concat([data, data_clean]).drop_duplicates() diff = data_all.shape[0] - data_clean.shape[0] print ('Difference: ' + str(diff)) ``` **<font color='red'>Answer:</font>** 4186 **3.4. Report values that look suspicious whereas you are not sure whether they are definitely wrong** <br> **<font color='red'>Answer:</font>** - value `Hong` in column `native.country`, we're not sure is it truly the name of the country `Hong Kong` or another meaning - value in `capital.gain` and `capital.loss` which are containing too many `0` value - value `Assoc-acdm` in `education` column # 4. Gathering interesting facts about the adult dataset (1.5 points) Useful information about the data can be acquired through visualization even before modeling. **4.1. Convert the attribute `Education` into type `category` and introduce the order for this attribute based on the educational level (e.g. Masters is higher education than Bachelors; use your best guess in ordering those levels for which the order is not obvious). Create a bar chart visualising the relative frequencies of values.** ``` # TODO data['education'] = data['education'].astype('category') data['education'].value_counts(normalize = True).reindex(['Doctorate', 'Masters', 'Bachelors', 'Some-college', 'Prof-school', 'Assoc-voc', 'Assoc-acdm', 'HS-grad', '12th', '11th', '10th', '9th', '7th-8th', '5th-6th', '1st-4th', 'Preschool' ]).plot(kind="bar") ``` **4.2. Create and print a Pandas data frame where each row stands for an occupation, each column stands for a level of education, and the cells in the table contain the average salary of people with the corresponding occupation and education level.** <br> Hint 1: You can use crosstab, see also aggfunc from the pandas documentation. ``` # TODO crosstab_df = pd.crosstab(data.occupation, data.education, values = data.salaries, aggfunc=np.mean).round(2) crosstab_df ``` **4.3. Make a separate plot for each row (occupation) of the table created in subtask b. Choose the type of the plot to convey the information as well as you can.** <br> ``` if crosstab_df is not crosstab_df.empty: for index, row in crosstab_df.iterrows(): row.plot(kind = 'bar') # TODO plt.show() ``` **4.4. List 3 interesting facts that you can read out of these plots from subtask c.** <br> **<font color='red'>Fact 1:</font>** For occupation `armed-forces` it shows that for people who have education `12th` grade have higher salaries than people with `bachelors`, `some-college`. and `HS-grade` background. **<font color='red'>Fact 2:</font>** For occupation `exec-managerial` it shows that for people with education `5th-6th` grade have the highest salaries compare to others. **<font color='red'>Fact 3:</font>** For occupation `farming-fishing` the highest salaries is owned by people with 'Doctorate' education ## How long did it take you to solve the homework? * Please answer as precisely as you can. It does not affect your points or grade in any way. It is okey, if it took 0.5 hours or 24 hours. The collected information will be used to improve future homeworks. <font color='red'> **Answer:**</font> **<font color='red'>(please change X in the next cell into your estimate)</font>** 10 hours ## How much the homework is difficult? you can put only number between $0:10$ ($0:$ easy, $10:$ difficult) <font color='red'> **Answer: 6**</font>
github_jupyter
``` import omnitool from omnitool.literature_values import * import matplotlib matplotlib.rcParams['text.usetex']=False import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ``` We'll use the asteroseismic data from Yu et al. 2018 ``` #Read in Jie Yu print('Reading in Yu+2018') sfile = '/home/oliver/PhD/Catalogues/RC_catalogues/Yu+18_table1.txt' yu18_1 = pd.read_csv(sfile, sep='|') sfile = '/home/oliver/PhD/Catalogues/RC_catalogues/Yu+18_table2.txt' yu18_2 = pd.read_csv(sfile, sep='|') yu18 = pd.merge(yu18_1, yu18_2, on='KICID',how='outer') yu18.rename(columns={'KICID':'KIC', 'EvoPhase':'stage', 'err_x':'numax_err', 'err.1_x':'dnu_err', 'err_y':'Teff_err', 'Fe/H':'[Fe/H]', 'err.2_y':'[Fe/H]_err', 'err.1_y':'logg_err', 'err.3_y':'M_err', 'err.4_y':'R_err'},inplace=True) #For consistency ``` First, lets get our asteroseismic values ``` #Calling the scaling relations class SC = omnitool.scalings(yu18.numax, yu18.dnu, yu18.Teff,\ _numax_err = yu18.numax_err,\ _dnu_err = yu18.dnu_err,\ _Teff_err = yu18.Teff_err) ``` Now lets pull out all the values we can calculate ``` yu18['aR'] = SC.get_radius()/Rsol yu18['aR_err'] = SC.get_radius_err()/Rsol yu18['aM'] = SC.get_mass()/Msol yu18['aM_err'] = SC.get_mass_err()/Msol yu18['alogg'] = SC.get_logg() yu18['alogg_err'] = SC.get_logg_err() yu18['L'] = SC.get_luminosity()/Lsol yu18['L_err'] = SC.get_luminosity_err()/Lsol yu18['Mbol'] = SC.get_bolmag() yu18['Mbol_err'] = SC.get_bolmag_err() ``` Lets plot a quick distribution of points on a HR diagram. ``` sns.jointplot(np.log10(yu18.Teff), np.log10(yu18.L),s=1) plt.show() ``` Now lets say we want to find the K and H band magnitudes of these stars asteroseismically. For this, we need an inverse bolometric correction. ``` #For the bolomteric correction, we need the total metallicity Z yu18['Z'] = Zsol * 10**yu18['[Fe/H]'] #Now we initialise the class using these values get_bc = omnitool.bolometric_correction(yu18.Teff.values,\ yu18.logg.values,\ yu18.Z.values) ``` We now want to calculate the bolometric correction for these stars in the K, H and J bands ``` KsBC = get_bc(band='Ks') HBC = get_bc(band='H') JBC = get_bc(band='J') ``` Using this value & the bolometric magnitude we can make the inverse bolometric correction ``` MKs_ast = yu18.Mbol.values - KsBC MH_ast = yu18.Mbol.values - HBC MJ_ast = yu18.Mbol.values - JBC ``` If we want to be extra thorough, we propagate the assumed error on the bolometric correction For now this is just guesstimated at 0.02 per Huber et al. 2017, BUT it should be noted that they use a different method of obtaining the BC ``` M_ast_err = np.sqrt(yu18.Mbol_err.values**2 + err_bc**2) ``` Note that this error is the same regardless of passband ``` '''Lets plot a distribution of the results...''' sns.distplot(MKs_ast) plt.xlabel('Asteroseismic Absolute Magnitude (K)') plt.show() print('Median in K: '+str(np.median(MKs_ast))) sns.distplot(MH_ast) plt.xlabel('Asteroseismic Absolute Magnitude (H)') plt.show() print('Median in H: '+str(np.median(MH_ast))) sns.distplot(MJ_ast) plt.xlabel('Asteroseismic Absolute Magnitude (J)') plt.show() print('Median in J: '+str(np.median(MJ_ast))) ``` Lets do a quick plot to illustrate the errors on the data... We'll plot Core He Burning stars only for clarity. ``` sel = yu18.stage == 1 fig, ax = plt.subplots() ax.scatter(MKs_ast[sel],yu18.numax[sel],s=1,zorder=1001) ax.errorbar(MKs_ast[sel],yu18.numax[sel],xerr=M_ast_err[sel], alpha=.5, fmt='none', c='grey',zorder=1000) ax.set_xlabel('Absolute asteroseismic magnitude (K)') ax.set_ylabel(r"Asteroseismic $\nu_{max}}$") plt.show() ``` Finally, lets compare our results to those obtained in the Yu+18 work to ensure they're the same. ``` sns.jointplot(yu18.aR, yu18.R_noCorrection, s=1) sns.jointplot(yu18.aR_err, yu18.R_err,s=1) plt.show() ``` Now imagine we want to recalculate luminosity, but using a given a correction to the delta nu scaling relations ``` #Calling the scaling relations class SC = omnitool.scalings(yu18.numax, yu18.dnu, yu18.Teff,\ _numax_err = yu18.numax_err,\ _dnu_err = yu18.dnu_err,\ _Teff_err = yu18.Teff_err) SC.give_corrections(fdnu=np.ones(len(yu18))*1.005, fdnu_err=np.ones(len(yu18))*1.005*0.02) yu18['corR'] = SC.get_radius()/Rsol yu18['corR_err'] = SC.get_radius_err()/Rsol sns.jointplot(yu18.R_noCorrection, yu18.corR,s=1) plt.show() sns.jointplot(yu18.R_err, yu18.corR_err,s=1) ``` The change is subtle, but its definitely there! And the uncertainties on the corrections have an effect also.
github_jupyter
## setup and notebook configuration ``` # scientific python stack import numpy as np import scipy as sp import sympy as sym import orthopy, quadpy # matplotlib, plotting setup import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.tri as mtri # delaunay triangulation from mpl_toolkits.mplot3d import Axes3D # surface plotting import seaborn as sns # nice plotting defaults import cmocean as cmo # ocean colormaps sym.init_printing(use_latex='mathjax') sns.set() %matplotlib inline %config InlineBackend.figure_format = 'svg' %load_ext autoreload %autoreload 2 # local imports import src.fem_base.master.mk_basis_nodal as mbn import src.fem_base.master.mk_master as mkm import src.fem_maps.fem_map as fem_map ``` # creation of 1D nodal bases We define the master 1D element as $I\in[-1, 1]$ ``` b = mbn.Basis_nodal(order=1, dim=1, element=0) b.plot_elm() fig, ax = plt.subplots(1, 5, figsize = (10, 1)) xx = np.linspace(-1, 1, 100) pts = [[x, 0, 0] for x in xx] for order in range(5): b = mbn.Basis_nodal(order=order, dim=1, element=0) yy = b.eval_at_pts(pts=pts) for basis_fn in range(b.nb): ax[order].plot(xx, yy[:, basis_fn]) ax[order].set_title(r'$p = {}$'.format(order)) ``` ## construction of vandermonde matrices ``` x = np.linspace(-1, 1, 100) n_polys = 4 vals = orthopy.line_segment.tree_jacobi(x, n=n_polys, alpha=0, beta=0, standardization='normal') for i in range(n_polys): plt.plot(x, vals[i], label='P_{}'.format(i)) plt.legend() plt.title('normalized Legendre polynomials') plt.show() ``` These polynomials agree with the explicitly listed polynomials in Hesthaven, so we know that they are orthonormalized correctly. ``` def Jacobi_Poly(r, alpha, beta, N): """ wraps orthopy to return Jacobi polynomial """ return orthopy.line_segment.tree_jacobi(r, n=N-1, alpha=alpha, beta=beta, standardization='normal') def P_tilde(r, N): P = np.zeros((len(r), N)) polyvals = Jacobi_Poly(r, alpha=0, beta=0, N=N) for j in range(N): P[:, j] = polyvals[j] return P.T def Vandermonde1D(N, x): """ initialize 1D vandermonde Matrix Vij = phi_j(x_i)""" V1D = np.zeros((len(x), N)) JacobiP = Jacobi_Poly(x, alpha=0, beta=0, N=N) for j, polyvals in enumerate(JacobiP): V1D[:, j] = polyvals return V1D def LegendreGaussLobatto(N): GL = quadpy.line_segment.GaussLobatto(N, a=0., b=0.) return GL.points, GL.weights def GaussLegendre(N): GL = quadpy.line_segment.GaussLegendre(N) return GL.points, GL.weights ``` An important conceptual point is that the Vandermonde matrix here is NOT the shape function matrix, it's the Vandermonde matrix of the Orthonormal polynomial basis. We will see this later as we have to create the shape function matrices. ## properties / conditioning of vandermonde matrices ``` equi_det, LGL_det = [], [] for N in range(2, 35): nb = N + 1 equi_pts = np.linspace(-1, 1, nb) V = Vandermonde1D(nb, equi_pts) equi_det.append(np.linalg.det(V)) LGL_pts, _ = LegendreGaussLobatto(nb) V = Vandermonde1D(nb, LGL_pts) LGL_det.append(np.linalg.det(V)) plt.semilogy(list(range(2, 35)), equi_det, label='equidistant') plt.semilogy(list(range(2, 35)), LGL_det, label='LGL nodes') plt.legend() plt.show() ``` This result agrees with Hesthaven. ``` # construct generic lagrange interpolant from scipy.interpolate import lagrange def lagrange_polys(pts): lagrange_polys = [] for i, pt in enumerate(pts): data = np.zeros_like(pts) data[i] = 1 lagrange_polys.append(lagrange(pts, data)) return lagrange_polys def lagrange_basis_at_pts(lagrange_polys, eval_pts): """ evaluates lagrange polynomials at eval_pts""" result = np.zeros((len(lagrange_polys) ,len(eval_pts))) for i, poly in enumerate(lagrange_polys): result[i, :] = lagrange_polys[i](eval_pts) return result ``` plot lagrange polys over equally spaced vs LGL points ``` N = 5 lp = np.linspace(-1, 1, N) lpolys = lagrange_polys(lp) vN = 100 view_pts = np.linspace(-1, 1, vN) li = lagrange_basis_at_pts(lpolys, view_pts) plt.plot(view_pts, li.T) plt.title('lagrange polynomials over equally spaced points') plt.show() N = 5 lp, _ = LegendreGaussLobatto(N) lpolys = lagrange_polys(lp) vN = 100 view_pts = np.linspace(-1, 1, vN) li = lagrange_basis_at_pts(lpolys, view_pts) plt.plot(view_pts, li.T) plt.title('lagrange polynomials over LGL points') plt.show() ``` So beautiful! By moving the lagrange data points to the nodal points, our basis functions don't exceed 1, unlike in the above plot, where we are already seeing a slight Runge phenomenon. ## vandermonde relations ### relationship between vandermonde $V$, basis polynomials $\tilde{\mathbf{P}}$, and lagrange basis functions (shape functions) $\ell$ Hesthaven makes the claim that $V^T \mathbf{\ell}(r) = \tilde{\mathbf{P}}(r)$ in (3.3). In Hesthaven's notation, $N$ denotes the polynomial order, $N_p$ denotes the number of nodal points (we would call $nb$), and let's call the number of "view points" `xx`, which are arbitrary. Then the shapes of the Hesthaven structures are: - $\mathbf{\ell}$, $\tilde{\mathbf{P}}$, $V$ are all (`nb`, `xx`) - $V^T \ell$ is (`xx`, `nb`) x (`nb`, `xx`) $\rightarrow$ (`xx`, `xx`) where rows contain the values of polynomials $\tilde{\mathbf{P}}$ This works for either equidistant points or the LGL points. ``` N = 5 lp, _ = LegendreGaussLobatto(N) #lp = np.linspace(-1, 1, N) view_pts = np.linspace(-1, 1, 50) l_polys = lagrange_polys(pts=lp) ℓ = lagrange_basis_at_pts(l_polys, eval_pts=view_pts) V = Vandermonde1D(N=len(view_pts), x=lp) P = np.dot(V.T, ℓ) # plot the result plt.plot(view_pts, ℓ.T, '--') plt.plot(view_pts, P[0:3,:].T) plt.show() ``` We see that indeed we recover the Legendre polynomials. More directly, we can invert the relation to find that $$\ell = (V^T)^{-1} \tilde{\mathbf{P}}$$ which allows us to create our nodal shape functions. ``` nb = 4 nodal_pts, _ = LegendreGaussLobatto(nb) view_pts = np.linspace(-1, 1, 50) # create the Vandermonde, P matrices V = Vandermonde1D(N=nb, x=nodal_pts) Vti = np.linalg.inv(V.T) P = P_tilde(r=view_pts, N=nb) print('shape of Vandermonde: {}'.format(V.shape)) print('shape of P: {}'.format(P.shape)) yy = np.dot(Vti, P) plt.plot(view_pts, yy.T) plt.title('nodal shape functions generated from orthogonal basis polynomials') plt.show() ``` ### relationship between vandermonde $V$ and mass matrix We can build on the relationship developed in the section above to form the mass matrix for a nodal basis. We note that $M_{ij} = \int_{-1}^{1}\ell_i(r)\, \ell_j(r) \,dr = (\ell_i, \ell_j)_I$, and if we expand out $\ell = (V^T)^{-1}\tilde{\mathbf{P}}$, it turns out (page 51) $$M = (V V^T)^{-1}$$ because of the orthogonal nature of our choice of basis function; the implication is that we can compute the integrals over the master element without the explicit need for quadrature points or weights. Note first that $\phi_i(\xi) = \sum_{n=1}^{nb} (V^T)_{in}^{-1} \tilde{P}_{n-1}(\xi)$. Then \begin{align} M_{ij} &= \int^{1}_{-1} \phi_i(\xi)\,\phi_j(\xi)\,d\xi = \int^{1}_{-1}\left[\sum_{k=1}^{nb} (V^T)_{ik}^{-1} \tilde{P}_{k-1}(\xi) \sum_{m=1}^{nb} (V^T)_{jm}^{-1} \tilde{P}_{m-1}(\xi) \right]\, d\xi \\ &= \sum_{k=1}^{nb} \sum_{m=1}^{nb} (V^T)_{ik}^{-1} (V^T)_{jm}^{-1} \int^{1}_{-1}\tilde{P}_{k-1}(\xi) \tilde{P}_{m-1}(\xi) =\sum_{k=1}^{nb} \sum_{m=1}^{nb} (V^T)_{ik}^{-1} (V^T)_{jm}^{-1} \delta_{km} \\ &=\sum_{k=1}^{nb} (V^T)_{im}^{-1} (V^T)_{jm}^{-1} = \sum_{k=1}^{nb} (V^T)_{mi}^{-1} (V)_{mj}^{-1} \\ &= (V^{T})^{-1} V^{-1} = (VV^T)^{-1} \end{align} Where note we've used the cute trick that $\int_{-1}^1 \tilde{P}_m \tilde{P}_n = \delta_{mn}$, since we chose an __orthonormal__ modal basis. Orthogonal wouldn't have done it, but an orthonormal modal basis has this property. We can check this relation against the more traditional way of constructing the mass matrix with quadrature. `master.shap` has dimensions of (`n_quad`, `nb`) ``` order = 3 m1d = mkm.Master_nodal(order=order, dim=1, element=0) xq, wq = m1d.cube_pts, m1d.cube_wghts shap = m1d.shap shapw = np.dot(np.diag(wq), m1d.shap) M_quadrature = np.dot(shap.T, shapw) Np = order + 1 nodal_points, _ = LegendreGaussLobatto(Np) V = Vandermonde1D(N=Np, x=nodal_points) M_vand = np.linalg.inv(np.dot(V, V.T)) # this will throw an error if not correct assert(np.allclose(M_quadrature, M_vand)) ``` ## efficient computation of derivatives of the basis functions ### derivatives of Legendre polynomials In order to compute the derivatives of the shape functions (which are expressed via the vandermonde matrix $V$), we must take the derivatives with respect to the orthogonal basis polynomials. There is an identity (Hesthaven, p. 52) $$ \frac{d \tilde{P}_n}{d r} = \sqrt{n(n+1)}\,\tilde{P}^{(1,1)}_{n-1}$$ This is in contrast to directly differentiating either the coefficients of $\tilde{P}$ or more directly the nodal shape functions $\ell$ if the explicit polynomial form is known (like in `scipy`, but this becomes trickier in multiple dimensions). As it turns out, the first approach is a very efficient way to compute these operators. ``` def Jacobi_Poly_Derivative(r, alpha, beta, N): """ take a derivative of Jacobi Poly, more general than above copy the format of orthopy (list of arrays) """ dp = [np.zeros_like(r)] Jacobi_P = Jacobi_Poly(r, alpha + 1, beta + 1, N) for n in range(1, N+1): gamma = np.sqrt(n * (n + alpha + beta + 1)) dp.append(gamma * Jacobi_P[n-1]) return dp #def dP_tilde(r, N): # P = np.zeros((len(r), N)) # polyvals = Jacobi_Poly_Derivative(r, alpha=0, beta=0, N=N) # for j in range(N): # P[:, j] = polyvals[j] # return P ``` We can examine some of the derivatives of the Legendre polynomials. ``` # some unit testing # first jacobi poly is const, so der should be 0 xx = np.linspace(-1, 1, 50) jpd = Jacobi_Poly_Derivative(xx, alpha=0, beta=0, N=3) for i, polyder in enumerate(jpd): plt.plot(xx, polyder, label=r'$P_{}^\prime(x)$'.format(i)) plt.legend() plt.show() ``` These look good. The derivative of the first Legendre polynomial is analytically 0, $P_1^\prime = \sqrt{3/2}$, $P_2^\prime$ should be linear, $P_3^\prime$ should be quadratic. ### discrete derivative operators We can declare the derivative Vandermonde matrix, and invert it in the same manner to obtain the derivatives of the nodal shape functions. This works because $$V^T \ell = P \Rightarrow V^T \frac{d}{dx} \ell = \frac{d}{dx}P$$ Hence $$V_r\equiv V^T D_r^T, \qquad {V_r}_{(ij)} = \frac{d \tilde{P}_j(r_i)}{d x} $$ and finally $D_r = V_r V^{-1}$ (see Hesthaven, p. 53), as well as $S = M D_r $, where $S_{ij} = \left(\phi_i, \frac{d\phi_j}{dx}\right)_I$, and where $M$ is the mass matrix. ``` def GradVandermonde1D(N, x): Vr = np.zeros((len(x), N)) dJacobi_P = Jacobi_Poly_Derivative(x, alpha=0, beta=0, N=N-1) for j, polyder in enumerate(dJacobi_P): Vr[:,j] = polyder return Vr p = 3 nb = p+1 nodal_pts, _ = LegendreGaussLobatto(nb) #nodal_pts = np.linspace(-1, 1, nb) view_pts = np.linspace(-1, 1, 50) # grad vandermonde V = Vandermonde1D(N=nb, x=nodal_pts) Vr = GradVandermonde1D(N=nb, x=view_pts) Vi = np.linalg.inv(V) Dr = np.dot(Vr, Vi) print('shape Vr: {}'.format(Vr.shape)) print('shape V inv: {}'.format(Vi.shape)) print('shape Dr: {}'.format(Dr.shape)) # shape functions V = Vandermonde1D(N=nb, x=nodal_pts) Vti = np.linalg.inv(V.T) P = P_tilde(r=view_pts, N=nb) shap = np.dot(Vti, P) # shape functions at view points fig, ax = plt.subplots(1, 2, figsize=(8, 4)) ax[0].plot(view_pts, shap.T, '--') ax[0].set_title(r'nodal shape functions $\phi$') ax[1].plot(view_pts, Dr) ax[1].set_title(r'derivatives of nodal shape functions $\frac{d \phi}{dx}$') plt.show() ``` As a remark, we can once again show the effect of using Legendre Gauss Lobatto points vs equally spaced nodal points. ``` N = 8 nb = N+1 nodal_pts_LGL, _ = LegendreGaussLobatto(nb) nodal_pts_equi = np.linspace(-1, 1, nb) view_pts = np.linspace(-1, 1, 100) # shape functions at view points fig, ax = plt.subplots(figsize=(8, 8)) ax.set_yticks([]) ax.set_xticks([]) labels = ['LGL nodes', 'uniform nodes'] for idx, nodal_pts in enumerate([nodal_pts_LGL, nodal_pts_equi]): # grad vandermonde V = Vandermonde1D(N=nb, x=nodal_pts) Vr = GradVandermonde1D(N=nb, x=view_pts) Vi = np.linalg.inv(V) Dr = np.dot(Vr, Vi) # shape functions V = Vandermonde1D(N=nb, x=nodal_pts) Vti = np.linalg.inv(V.T) P = P_tilde(r=view_pts, N=nb) shap = np.dot(Vti, P) # plot ax = fig.add_subplot(2, 2, idx*2+1) ax.plot(view_pts, shap.T) ax.set_yticks([0, 1]) ax.set_title(r' $\phi$, {}'.format(labels[idx])) ax = fig.add_subplot(2, 2, idx*2+2) ax.plot(view_pts, Dr) ax.set_title(r'$\f{}$, {}'.format('rac{d \phi}{dx}',labels[idx])) plt.subplots_adjust(wspace=0.2, hspace=0.2) fig.suptitle(r'$\phi$ and $\frac{d\phi}{d x}$, LGL vs uniformly-spaced nodes') plt.show() ``` ### remarks on discrete derivative operators Suppose we compute the derivative matrix $D_r$ at the nodal points for some order $p$. There are some interesting properties to understand about these derivative matrices. #### annihilation of constant vectors Note that if we represent a function nodally, i.e., $u = c_i \phi_i(x)$, then $$ \frac{du}{dx} = \frac{d}{dx}(c_i \phi_i(x)) = c_i \frac{d\phi_i}{dx} $$ Therefore, if we want to discretely take a derivative of a function (we'll represent the function on the master element for now, but trivially, we could map it to some other region), it suffices to multiply the derivative operator with the nodal vector $D_r u$. It should be clear, then, that $D_r$ will annihilate any constant vector. ``` p = 2 nb = p+1 nodal_pts, _ = LegendreGaussLobatto(nb) u = np.ones_like(nodal_pts) # grad vandermonde V = Vandermonde1D(N=nb, x=nodal_pts) Vr = GradVandermonde1D(N=nb, x=nodal_pts) Vi = np.linalg.inv(V) Dr = np.dot(Vr, Vi) duh = np.dot(Dr, u) print(np.max(np.abs(duh))) ``` #### exponential convergence in $p$ for smooth functions If the function of which we are attempting to take the discrete derivative is smooth (in the sense of infinitely differentiable), then we will see exponential convergence to the analytical solution w/r/t polynomial order of the nodal basis. ``` ps = [1, 2, 3, 4, 8, 12, 16, 18, 24, 32] errs = [] fig, ax = plt.subplots(1, 3, figsize=(8, 3)) for p in ps: nb = p+1 nodal_pts, _ = LegendreGaussLobatto(nb) view_pts = np.linspace(-1, 1, 100) # grad vandermonde V = Vandermonde1D(N=nb, x=nodal_pts) Vr = GradVandermonde1D(N=nb, x=nodal_pts) Vi = np.linalg.inv(V) Dr = np.dot(Vr, Vi) # nodal shap V = Vandermonde1D(N=nb, x=nodal_pts) Vti = np.linalg.inv(V.T) P = P_tilde(r=view_pts, N=nb) view_shap = np.dot(Vti, P) u = np.sin(nodal_pts-np.pi/4.) du = np.cos(view_pts-np.pi/4) duh = np.dot(Dr, u) view_duh = np.dot(duh, view_shap) err = np.max(np.abs(view_duh - du)) errs.append(err) # plot first few if p < 4: ax[p-1].plot(view_pts, np.sin(view_pts), label=r'$u$') ax[p-1].plot(view_pts, du, label=r'$u^\prime$') ax[p-1].plot(view_pts, view_duh, '--', label=r'$du_h$') ax[p-1].set_title(r'$p={}$'.format(p)) ax[p-1].legend() plt.show() fig, ax = plt.subplots() ax.semilogy(ps, errs) ax.set_xticks(ps) ax.set_ylabel(r'$||du - du_h||_{L_\infty}$') ax.set_xlabel('polynomial order p') ax.set_title('exponential convergence of discrete derivative') plt.show() ``` # creation of a 1D master element Define a simple nodal basis object for 1D problems -- no need to be fancy, pretty much the only thing we need this for is to get nodal shape functions and their derivatives efficiently. The underlying orthonormal Legendre polynomial basis is hidden to the user. ``` class NodalBasis1D(object): """ minimalist nodal basis object: efficiently computes shape functions and their derivatives """ def __init__(self, p, node_spacing='GAUSS_LOBATTO'): self.nb = p + 1 if node_spacing == 'GAUSS_LOBATTO': self.nodal_pts, _ = LegendreGaussLobatto(self.nb) elif node_spacing == 'EQUIDISTANT': self.nodal_pts = np.linspace(-1, 1, self.nb) else: raise ValueError('node_spacing {} not recognized'.format(node_spacing)) def shape_functions_at_pts(self, pts): """ computes shape functions evaluated at pts on [-1, 1] @retval shap (len(pts), nb) phi_j(pts[i]) """ V = Vandermonde1D(N=self.nb, x=self.nodal_pts) VTinv = np.linalg.inv(V.T) P = P_tilde(r=pts, N=self.nb) shap = np.dot(VTinv, P) return shap.T def shape_function_derivatives_at_pts(self, pts): """ computes shape function derivatives w/r/t x on [-1, 1] @retval shap_der, (Dr in Hesthaven), (len(pts), nb) d/dx phi_j(pts[i]) """ V = Vandermonde1D(N=self.nb, x=self.nodal_pts) Vx = GradVandermonde1D(N=self.nb, x=pts) Vinv = np.linalg.inv(V) shap_der = np.dot(Vx, Vinv) return shap_der ``` Define a 1D master element, which is built on top of the 1D basis. - Precompute shape functions at the nodal points and Gauss Legendre quadrature points, both are useful for different types of schemes. We use Gauss Legendre points instead of Gauss Lobatto points because they can integrate degree $2n-1$ polynomials exactly instead of $2n - 3$, where $n$ is the number of integration points. We would like to integrate $(\phi_i, \phi_j)_{\hat{K}}$, which is order 2$p$, so to integrate the mass matrix exactly, we need $2p + 1$ points, and common practice is $2p+2$. Since quadrature in 1D is cheap, we opt for the latter. - Precompute mass matrix $M_{ij} = (\phi_i, \phi_j)$ and stiffness matrices $S_{ij} = \left(\phi_i, \frac{d\phi_j}{dx}\right)$, $K_{ij} = \left(\frac{d\phi_i}{dx}, \frac{d\phi_j}{dx}\right)$. Additionally, store $M^{-1}$, as it is commonly used. Although Hesthaven's method for mass and stiffness matrices are elegant, they rely on the underlying choice of an orthanormal modal basis. Since this class could be overloaded to work with other choices of basis, better to simply compute these matrices with quadrature. ``` class Master1D(object): """ minimalist 1D master object """ def __init__(self, p, nquad_pts=None, *args, **kwargs): self.p, self.nb = p, p+1 self.basis = NodalBasis1D(p=p, **kwargs) self.nodal_pts = self.basis.nodal_pts self.nq = 2*self.p + 2 if nquad_pts is None else nquad_pts self.quad_pts, self.wghts = GaussLegendre(self.nq) # shape functions at nodal and quadrature points self.shap_nodal, self.dshap_nodal = self.mk_shap_and_dshap_at_pts(self.nodal_pts) self.shap_quad, self.dshap_quad = self.mk_shap_and_dshap_at_pts(self.quad_pts) # mass, stiffness matrices self.M, self.S, self.K = self.mk_M(), self.mk_S(), self.mk_K() self.Minv = np.linalg.inv(self.M) # lifting permuatation matrix L (0s, 1s) self.L = self.mk_L() def mk_shap_and_dshap_at_pts(self, pts): shap = self.basis.shape_functions_at_pts(pts) dshap = self.basis.shape_function_derivatives_at_pts(pts) return shap, dshap def mk_M(self): shapw = np.dot(np.diag(self.wghts), self.shap_quad) M = np.dot(self.shap_quad.T, shapw) return M def mk_S(self): dshapw = np.dot(np.diag(self.wghts), self.dshap_quad) S = np.dot(self.shap_quad.T, dshapw) return S def mk_K(self): dshapw = np.dot(np.diag(self.wghts), self.dshap_quad) K = np.dot(self.dshap_quad.T, dshapw) return K def mk_L(self): L = np.zeros((self.nb, 2)) L[0, 0] = 1 L[-1, 1] = 1 return L @property def shap_der(self): """ return the shape derivatives for apps expecting 2, 3D""" return [self.dshap_quad] ``` # creation of 1D mesh and DOF handler ## 1D mappings For 1D problems, the mapping from the master element to physical space elements is somewhat trivial, since there's no reason for the transformation to be anything except affine. Note though, that when the 1D elements are embedded in 2D, then the transformations may be non-affine, in which case we must handle isoparametric mappings and the like. We defer this until later. For an affine mapping, we have the simple mapping $$x(\xi) = x_L^k + \frac{1 + \xi}{2}(x_R^k - x_L^k)$$ With which we can move the nodal master points to their physical space coordinates. ``` # build T and P arrays P = np.linspace(2, 4, 5) class Mesh1D(object): def __init__(self, P): """ @param P vertex points, sorted by x position """ self.verts = P self.nElm, self.nEdges = len(self.verts) - 1, len(self.verts) self.connectivity = self.build_T() connected_one_side = np.bincount(self.connectivity.ravel()) == 1 self.boundary_verts = np.where(connected_one_side)[0] def build_T(self): """ element connectivity array from 1D vertex list """ T = np.zeros((self.nElm, 2), dtype=int) T[:,0] = np.arange(self.nElm) T[:,1] = np.arange(self.nElm) + 1 return T class dofh_1D(object): pass class DG_dofh_1D(dofh_1D): def __init__(self, mesh, master): self.mesh, self.master = mesh, master self.n_dof = self.master.nb * self.mesh.nElm self.dgnodes = self.mk_dgnodes() self.lg = self.mk_lg() self.lg_PM = self.mk_minus_plus_lg() self.nb, self.nElm = self.master.nb, self.mesh.nElm self.ed2elm = self.mk_ed2elm() def mk_dgnodes(self): """ map master nodal pts to element vertices def'd in self.mesh """ dgn = np.zeros((self.master.nb, self.mesh.nElm)) master_nodal_pts = np.squeeze(self.master.nodal_pts) for elm, elm_verts in enumerate(self.mesh.connectivity): elm_vert_pts = self.mesh.verts[elm_verts] elm_width = elm_vert_pts[1] - elm_vert_pts[0] mapped_pts = elm_vert_pts[0] + (1+master_nodal_pts)/2.*(elm_width) dgn[:, elm] = mapped_pts return dgn def mk_lg(self): """ number all dof sequentially by dgnodes """ node_numbers = np.arange(np.size(self.dgnodes)) lg = node_numbers.reshape(self.dgnodes.shape, order='F') return lg def mk_minus_plus_lg(self): """ (-) denotes element interior, (+) denotes exterior""" lg_PM = dict() lg_PM['-'] = self.lg[[0, -1], :].ravel(order='F') lgP = self.lg[[0, -1],:] lgP[0, 1: ] -= 1 # shift nodes to left of first lgP[1, :-1] += 1 # shift nodes to right of last lg_PM['+'] = lgP.ravel(order='F') return lg_PM def mk_ed2elm(self): """ internal map holding the indicies to reshape vector of values on faces to element edge space (2, nElm), duplicating the values on either side of interior faces """ f2elm = np.zeros((2, self.nElm)) faces = np.arange(self.mesh.nEdges) # numpy magic is doing the following: # # [[0, 1, 2, 3] # [0, 1, 2, 3]] - ravel('F') -> [0, 0, 1, 1, 2, 2, 3, 3] # # close, but ends duplicated. => trim the ends and reshape to f2elm shape # # [[0, 1, 2] # [1, 2, 3]] # f2elm = np.vstack((faces, faces)).ravel( order='F')[1:-1].reshape(f2elm.shape, order='F') return f2elm def edge2elm_ed(self, arr): """ internal method to move edge values (defined on the interfaces) to values on the "element edge space", the edge dof interior to each element @param arr array formatted on edge space (nFaces,) @retval elmEdArr array formatted on "element edge space" (2, nElm) """ return arr[self.ed2elm] ``` # computation of fluxes The 'back end' of an explicit DG computation is the unrolled vector of all the problem unknowns. The front end that we'd like to interact with is the dgnodes data structure ``` def plot_solution(ax, u, dofh): """ u formatted like dgnodes """ for elm in range(dofh.nElm): nodal_pts = dofh.dgnodes[:, elm] nodal_values = u[:, elm] ax.plot(nodal_pts, nodal_values) return ax # Low storage Runge-Kutta coefficients LSERK rk4a = np.array([ 0.0, -567301805773.0/1357537059087.0, -2404267990393.0/2016746695238.0, -3550918686646.0/2091501179385.0, -1275806237668.0/842570457699.0]) rk4b = np.array([ 1432997174477.0/9575080441755.0, 5161836677717.0/13612068292357.0, 1720146321549.0/2090206949498.0, 3134564353537.0/4481467310338.0, 2277821191437.0/14882151754819.0]) rk4c = np.array([ 0.0, 1432997174477.0/9575080441755.0, 2526269341429.0/6820363962896.0, 2006345519317.0/3224310063776.0, 2802321613138.0/2924317926251.0]) # constants π = np.pi # geometry set up P = np.linspace(0, 2*π, 10) mesh1d = Mesh1D(P) master = Master1D(p=2) dofh = DG_dofh_1D(mesh1d, master) mapdgn = np.zeros((dofh.dgnodes.shape[0], 1, dofh.dgnodes.shape[1])) mapdgn[:,0,:] = dofh.dgnodes _map = fem_map.Affine_Mapping(master=[master], dgnodes=[mapdgn]) ``` We choose numerical fluxes of the form $$\widehat{au} =\left\{\!\!\left\{au\right\}\!\!\right\} + (1-\alpha)\frac{|a|}{2} \left[\!\!\left[u\right]\!\!\right]$$ Where $\alpha = 0$ represents an upwinded flux and $\alpha=1$ represents a central flux. These are shown in Hesthaven to be stable for the equation we are interested in solving. ``` def compute_interior_flux(u, norm, dofh, α): """ computes the numerical flux at all of the element interfaces @param u the current solution u, unrolled to a vector NOTE: boundary interfaces will be filled with garbage, and must be corrected """ pm = dofh.lg_PM # equivalent to the flux # \hat{au} = {{au}} + (1-α) * |a|/2 * [[u]] # at element interfaces. First and last interface will have garbage. flux = a/2*(u[pm['-']] + u[pm['+']]) + (1-α)*np.abs(norm*a)/2.*(u[pm['+']] - u[pm['-']]) return flux ``` # semi-discrete scheme Considering the "weak" DG-FEM form, we have the semi-discrete element local equation \begin{align} \int_K \frac{\partial u_h}{\partial t} v \, dK +\int_{K} (au_h) \frac{\partial v}{\partial x} \, dK = -\int_{\partial K} \hat{n}\cdot \widehat{au} v \, d\partial K \end{align} Choosing a representation $u=u_i\phi_i$ piecewise polynomial over each element, and the same test space, we have, for a given choice of numerical flux $\widehat{au}$, and noting that in 1D, the normal vectors are simply (-1, +1): \begin{align} \int_K \frac{\partial}{\partial t} (u_i(t) \phi_i(x)) \phi_j(x) \, dx +\int_{K} a(u_i(t)\phi_i(x)) \frac{\partial \phi_j(x)}{\partial x} \, dx = -(\widehat{au}(x_R) - \widehat{au}(x_L)) \end{align} transforming the integrals to the reference element: \begin{align} \int_{\hat{K}} \frac{\partial}{\partial t} (u_i(t) \phi_i(\xi)) \phi_j(\xi) \,|det(J)|\, d\xi +\int_{\hat{K}} a(u_i(t)\phi_i(\xi)) \frac{\partial \phi_j(\xi)}{\partial \xi} \, |det(J)|\, d\xi = -(\widehat{au}(x_R) - \widehat{au}(x_L)) \end{align} This completes the description of the semi-discrete scheme, and we have a choice as to how to compute these integrals. The important part is that since the coefficients $u_i$ vary in time but are constants with respect to space, we can write \begin{align} &\frac{\partial u_i(t)}{\partial t} \int_{\hat{K}} \phi_i(\xi) \phi_j(\xi) \,|det(J)|\, d\xi +au_i\int_{\hat{K}} \phi_i(\xi) \left(\frac{d\xi}{dx}\right)\frac{\partial \phi_j(\xi)}{\partial \xi} \, |det(J)|\, d\xi = -(\widehat{au}(x_R) - \widehat{au}(x_L)) \\ &\Rightarrow M_K \vec{\frac{du_h}{dt}} + a S_K \vec{u_h} = - L\, (\widehat{au}(x_R) - \widehat{au}(x_L)) \end{align} Where we have computed $M_K$ and $S_K$, the mass and stiffness matrices for element $K$. Although we would normally do this with a quadrature rule, we can take advantage of the fact that in 1D (and indeed under any affine mapping from reference to physical element), $J^{-1}$ and $|\det(J)|$ will be constant over the entire element (also note that in 1D, $J^{-1}$ is a 1x1 matrix)<sup>1</sup>. In that case, we can treat both as constants, precompute $M_{\hat{K}}, S_{\hat{K}}$, and multiply the entire element-local equation by $M^{-1}$, giving \begin{align} \vec{\frac{du_h}{dt}} &= - a \frac{\det(J)_K}{\det(J)_K}\, J^{-1}_K M_{\hat{K}}^{-1}S^T_{\hat{K}} \vec{u_h} - \frac{1}{\det(J)_K} M^{-1}_K L\, (\widehat{au}(x_R) - \widehat{au}(x_L)) \\ &= - a \, J^{-1}_K M_{\hat{K}}^{-1}S^T_{\hat{K}} \vec{u_h} - \frac{1}{\det(J)_K} M^{-1}_K L\, (\widehat{au}(x_R) - \widehat{au}(x_L)) \end{align} Which is a good form for a black box integrator, since we have a "naked" $\frac{du_h}{dt}$, and because the scheme is explicit. note<sup>1</sup>: $J, J^{-1}$ are 1x1 matrices, and $\det{J}$ is simply $J_{11}$; $J^{-1} = 1/J_{11}$. It's important for the clarity of explicit schemes to understand where these cancellations occur. ``` def advect_rhs_1D(u, t_local, a, dofh, _map, master, flux_fn, gD, norm): return u # final time T = 10 # compute time step size, irrelevant for backward euler CFL = 0.75 Δx = dofh.dgnodes[1,0] - dofh.dgnodes[0,0] Δt = CFL/(2*π)*Δx Δt = Δt / 2 # number of timesteps needed steps = int(np.ceil(T/Δt)) # initial condition, advection speed solution = np.zeros((steps, *dofh.dgnodes.shape)) a = 2 * np.pi solution[0,::] = np.sin(dofh.dgnodes) LSERK_stages = [0, 1, 2, 3, 4] t = 0 gD = lambda t: -np.sin(a*t) # normal vectors, all positive norm = np.ones((2, dofh.nElm)) norm[0,:] *= -1 # function pointer to something that can compute fluxes flux_fn = compute_interior_flux # time loop RK_resid = np.zeros_like(dofh.dgnodes) for tstep in range(3): u = solution[tstep, ::] for s in LSERK_stages: t_local = t + rk4c[s]*Δt rhsu = advect_rhs_1D(u, t_local, a, dofh, _map, master, flux_fn, gD, norm) RK_resid = rk4a[s]*RK_resid + Δt*rhsu u += rk4b[s]*RK_resid t += Δt u0 = solution[0,:,:] fix, ax = plt.subplots() ax = plot_solution(ax, u0, dofh) pm = dofh.lg_PM u = u0.ravel() # normal vectors on interfaces, all positive norm_faces = np.ones(pm['-'].shape[0]) α = 0 # compute interior fluxes flux = compute_interior_flux(u, norm_faces, dofh, α) # compute boundary fluxes flux[0] = gD(t_local) flux[-1] = flux[0] dofh.edge2elm_ed(flux) ```
github_jupyter
## Define the Convolutional Neural Network After you've looked at the data you're working with and, in this case, know the shapes of the images and of the keypoints, you are ready to define a convolutional neural network that can *learn* from this data. In this notebook and in `models.py`, you will: 1. Define a CNN with images as input and keypoints as output 2. Construct the transformed FaceKeypointsDataset, just as before 3. Train the CNN on the training data, tracking loss 4. See how the trained model performs on test data 5. If necessary, modify the CNN structure and model hyperparameters, so that it performs *well* **\*** **\*** What does *well* mean? "Well" means that the model's loss decreases during training **and**, when applied to test image data, the model produces keypoints that closely match the true keypoints of each face. And you'll see examples of this later in the notebook. --- ## CNN Architecture Recall that CNN's are defined by a few types of layers: * Convolutional layers * Maxpooling layers * Fully-connected layers You are required to use the above layers and encouraged to add multiple convolutional layers and things like dropout layers that may prevent overfitting. You are also encouraged to look at literature on keypoint detection, such as [this paper](https://arxiv.org/pdf/1710.00977.pdf), to help you determine the structure of your network. ### TODO: Define your model in the provided file `models.py` file This file is mostly empty but contains the expected name and some TODO's for creating your model. --- ## PyTorch Neural Nets To define a neural network in PyTorch, you define the layers of a model in the function `__init__` and define the feedforward behavior of a network that employs those initialized layers in the function `forward`, which takes in an input image tensor, `x`. The structure of this Net class is shown below and left for you to fill in. Note: During training, PyTorch will be able to perform backpropagation by keeping track of the network's feedforward behavior and using autograd to calculate the update to the weights in the network. #### Define the Layers in ` __init__` As a reminder, a conv/pool layer may be defined like this (in `__init__`): ``` # 1 input image channel (for grayscale images), 32 output channels/feature maps, 3x3 square convolution kernel self.conv1 = nn.Conv2d(1, 32, 3) # maxpool that uses a square window of kernel_size=2, stride=2 self.pool = nn.MaxPool2d(2, 2) ``` #### Refer to Layers in `forward` Then referred to in the `forward` function like this, in which the conv1 layer has a ReLu activation applied to it before maxpooling is applied: ``` x = self.pool(F.relu(self.conv1(x))) ``` Best practice is to place any layers whose weights will change during the training process in `__init__` and refer to them in the `forward` function; any layers or functions that always behave in the same way, such as a pre-defined activation function, should appear *only* in the `forward` function. #### Why models.py You are tasked with defining the network in the `models.py` file so that any models you define can be saved and loaded by name in different notebooks in this project directory. For example, by defining a CNN class called `Net` in `models.py`, you can then create that same architecture in this and other notebooks by simply importing the class and instantiating a model: ``` from models import Net net = Net() ``` ``` # import the usual resources import matplotlib.pyplot as plt import numpy as np # watch for any changes in model.py, if it changes, re-load it automatically %load_ext autoreload %autoreload 2 ## TODO: Define the Net in models.py import torch import torch.nn as nn import torch.nn.functional as F import torch device = "cpu" FloatTensor = torch.FloatTensor #if torch.cuda.is_available(): # device = "cuda:0" # FloatTensor = torch.cuda.FloatTensor ## TODO: Once you've define the network, you can instantiate it # one example conv layer has been provided for you from models import NaimishNet net = NaimishNet().to(device) print(net) ``` ## Transform the dataset To prepare for training, create a transformed dataset of images and keypoints. ### TODO: Define a data transform In PyTorch, a convolutional neural network expects a torch image of a consistent size as input. For efficient training, and so your model's loss does not blow up during training, it is also suggested that you normalize the input images and keypoints. The necessary transforms have been defined in `data_load.py` and you **do not** need to modify these; take a look at this file (you'll see the same transforms that were defined and applied in Notebook 1). To define the data transform below, use a [composition](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#compose-transforms) of: 1. Rescaling and/or cropping the data, such that you are left with a square image (the suggested size is 224x224px) 2. Normalizing the images and keypoints; turning each RGB image into a grayscale image with a color range of [0, 1] and transforming the given keypoints into a range of [-1, 1] 3. Turning these images and keypoints into Tensors These transformations have been defined in `data_load.py`, but it's up to you to call them and create a `data_transform` below. **This transform will be applied to the training data and, later, the test data**. It will change how you go about displaying these images and keypoints, but these steps are essential for efficient training. As a note, should you want to perform data augmentation (which is optional in this project), and randomly rotate or shift these images, a square image size will be useful; rotating a 224x224 image by 90 degrees will result in the same shape of output. ``` from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils # the dataset we created in Notebook 1 is copied in the helper file `data_load.py` from data_load import FacialKeypointsDataset # the transforms we defined in Notebook 1 are in the helper file `data_load.py` from data_load import Rescale, RandomCrop, Normalize, ToTensor ## TODO: define the data_transform using transforms.Compose([all tx's, . , .]) # order matters! i.e. rescaling should come before a smaller crop data_transform = transforms.Compose([Rescale((250, 250)), RandomCrop((224, 224)), Normalize(), ToTensor()]) # testing that you've defined a transform assert(data_transform is not None), 'Define a data_transform' # create the transformed dataset transformed_dataset = FacialKeypointsDataset(csv_file='data/training_frames_keypoints.csv', root_dir='data/training/', transform=data_transform) print('Number of images: ', len(transformed_dataset)) # iterate through the transformed dataset and print some stats about the first few samples for i in range(4): sample = transformed_dataset[i] print(i, sample['image'].size(), sample['keypoints'].size()) ``` ## Batching and loading data Next, having defined the transformed dataset, we can use PyTorch's DataLoader class to load the training data in batches of whatever size as well as to shuffle the data for training the model. You can read more about the parameters of the DataLoader, in [this documentation](http://pytorch.org/docs/master/data.html). #### Batch size Decide on a good batch size for training your model. Try both small and large batch sizes and note how the loss decreases as the model trains. **Note for Windows users**: Please change the `num_workers` to 0 or you may face some issues with your DataLoader failing. ``` # load training data in batches batch_size = 8 train_loader = DataLoader(transformed_dataset, batch_size=batch_size, shuffle=True, num_workers=0) ``` ## Before training Take a look at how this model performs before it trains. You should see that the keypoints it predicts start off in one spot and don't match the keypoints on a face at all! It's interesting to visualize this behavior so that you can compare it to the model after training and see how the model has improved. #### Load in the test dataset The test dataset is one that this model has *not* seen before, meaning it has not trained with these images. We'll load in this test data and before and after training, see how your model performs on this set! To visualize this test data, we have to go through some un-transformation steps to turn our images into python images from tensors and to turn our keypoints back into a recognizable range. ``` # load in the test data, using the dataset class # AND apply the data_transform you defined above # create the test dataset test_data_transform = transforms.Compose([Rescale((224, 224)), Normalize(), ToTensor()]) test_dataset = FacialKeypointsDataset(csv_file='data/test_frames_keypoints.csv', root_dir='data/test/', transform=test_data_transform) # load test data in batches batch_size = 10 test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=4) ``` ## Apply the model on a test sample To test the model on a test sample of data, you have to follow these steps: 1. Extract the image and ground truth keypoints from a sample 2. Make sure the image is a FloatTensor, which the model expects. 3. Forward pass the image through the net to get the predicted, output keypoints. This function test how the network performs on the first batch of test data. It returns the images, the transformed images, the predicted keypoints (produced by the model), and the ground truth keypoints. ``` # test the model on a batch of test images def net_sample_output(): # iterate through the test dataset for i, sample in enumerate(test_loader): # get sample data: images and ground truth keypoints images = sample['image'] key_pts = sample['keypoints'] # convert images to FloatTensors images = images.type(FloatTensor) # forward pass to get net output output_pts = net(images) # reshape to batch_size x 68 x 2 pts output_pts = output_pts.view(output_pts.size()[0], 68, -1) # break after first image is tested if i == 0: return images, output_pts, key_pts ``` #### Debugging tips If you get a size or dimension error here, make sure that your network outputs the expected number of keypoints! Or if you get a Tensor type error, look into changing the above code that casts the data into float types: `images = images.type(torch.FloatTensor)`. ``` # call the above function # returns: test images, test predicted keypoints, test ground truth keypoints test_images, test_outputs, gt_pts = net_sample_output() # print out the dimensions of the data to see if they make sense print(test_images.data.size()) print(test_outputs.data.size()) print(gt_pts.size()) ``` ## Visualize the predicted keypoints Once we've had the model produce some predicted output keypoints, we can visualize these points in a way that's similar to how we've displayed this data before, only this time, we have to "un-transform" the image/keypoint data to display it. Note that I've defined a *new* function, `show_all_keypoints` that displays a grayscale image, its predicted keypoints and its ground truth keypoints (if provided). ``` def show_all_keypoints(image, predicted_key_pts, gt_pts=None): """Show image with predicted keypoints""" # image is grayscale plt.imshow(image, cmap='gray') plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='m') # plot ground truth points as green pts if gt_pts is not None: plt.scatter(gt_pts[:, 0], gt_pts[:, 1], s=20, marker='.', c='g') ``` #### Un-transformation Next, you'll see a helper function. `visualize_output` that takes in a batch of images, predicted keypoints, and ground truth keypoints and displays a set of those images and their true/predicted keypoints. This function's main role is to take batches of image and keypoint data (the input and output of your CNN), and transform them into numpy images and un-normalized keypoints (x, y) for normal display. The un-transformation process turns keypoints and images into numpy arrays from Tensors *and* it undoes the keypoint normalization done in the Normalize() transform; it's assumed that you applied these transformations when you loaded your test data. ``` # visualize the output # by default this shows a batch of 10 images def visualize_output(test_images, test_outputs, gt_pts=None, batch_size=10): for i in range(batch_size): plt.figure(figsize=(20,10)) ax = plt.subplot(1, batch_size, i+1) # un-transform the image data image = test_images[i].data # get the image from it's wrapper image = image.numpy() # convert to numpy array from a Tensor image = np.transpose(image, (1, 2, 0)) # transpose to go from torch to numpy image # un-transform the predicted key_pts data predicted_key_pts = test_outputs[i].data predicted_key_pts = predicted_key_pts.numpy() # undo normalization of keypoints predicted_key_pts = predicted_key_pts*50.0+100 # plot ground truth points for comparison, if they exist ground_truth_pts = None if gt_pts is not None: ground_truth_pts = gt_pts[i] ground_truth_pts = ground_truth_pts*50.0+100 # call show_all_keypoints show_all_keypoints(np.squeeze(image), predicted_key_pts, ground_truth_pts) plt.axis('off') plt.show() # call it visualize_output(test_images, test_outputs, gt_pts) ``` ## Training #### Loss function Training a network to predict keypoints is different than training a network to predict a class; instead of outputting a distribution of classes and using cross entropy loss, you may want to choose a loss function that is suited for regression, which directly compares a predicted value and target value. Read about the various kinds of loss functions (like MSE or L1/SmoothL1 loss) in [this documentation](http://pytorch.org/docs/master/_modules/torch/nn/modules/loss.html). ### TODO: Define the loss and optimization Next, you'll define how the model will train by deciding on the loss function and optimizer. --- ``` ## TODO: Define the loss and optimization import torch.optim as optim criterion = nn.SmoothL1Loss() #nn.MSELoss() optimizer = optim.Adam(net.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08) ``` ## Training and Initial Observation Now, you'll train on your batched training data from `train_loader` for a number of epochs. To quickly observe how your model is training and decide on whether or not you should modify it's structure or hyperparameters, you're encouraged to start off with just one or two epochs at first. As you train, note how your the model's loss behaves over time: does it decrease quickly at first and then slow down? Does it take a while to decrease in the first place? What happens if you change the batch size of your training data or modify your loss function? etc. Use these initial observations to make changes to your model and decide on the best architecture before you train for many epochs and create a final model. ``` import sys import math def process_data(data_loader, train=False): if train: net.train() else: net.eval() losses = [] average_loss = 0.0 #running_loss = 0.0 batch_count = len(data_loader) for batch_i, data in enumerate(data_loader): # get the input images and their corresponding labels images = data['image'] key_pts = data['keypoints'] # flatten pts key_pts = key_pts.view(key_pts.size(0), -1) # convert variables to floats for regression loss key_pts = key_pts.type(FloatTensor) images = images.type(FloatTensor) if train: # forward pass to get outputs output_pts = net(images) else: with torch.no_grad(): output_pts = net(images) # calculate the loss between predicted and target keypoints loss = criterion(output_pts, key_pts) if train: # zero the parameter (weight) gradients optimizer.zero_grad() # backward pass to calculate the weight gradients loss.backward() # update the weights optimizer.step() # print loss statistics # to convert loss into a scalar and add it to the running_loss, use .item() running_loss = loss.item() losses.append(running_loss) # average_training_loss = total_training_loss / (batch_i + 1) #if batch_i % 10 == 9: # print every 10 batches action = "training" if train else "validating" print('\r{} batch: {}/{}, current loss: {:.3f}'.format(action, batch_i + 1, batch_count, running_loss), end="") sys.stdout.flush() #running_loss = 0.0 average_loss = np.mean(losses) return average_loss def train_net(n_epochs, patience): last_train_loss = math.inf last_valid_loss = math.inf best_valid_loss = last_valid_loss epochs_without_improvement = 0 for epoch in range(n_epochs): print("\n--------------------------------------------------------------------------") print("Epoch: {}, last train loss: {:.3f}, last valid loss: {:.3f}, best valid loss: {:.3f}".format( epoch + 1, last_train_loss, last_valid_loss, best_valid_loss )) last_train_loss = process_data(train_loader, train=True) test_loss = process_data(test_loader, train=False) last_valid_loss = test_loss if last_valid_loss < best_valid_loss: best_valid_loss = last_valid_loss torch.save(net.state_dict(), "saved_models/keypoints_model.pth") else: epochs_without_improvement += 1 if epochs_without_improvement > patience: print("\nstopping early") break # train your network n_epochs = 50 patience = 3 train_net(n_epochs, patience) ``` ## Test data See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. ## Test data See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. ## Test data See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. ## Test data See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. ## Test data See how your model performs on previously unseen, test data. We've already loaded and transformed this data, similar to the training data. Next, run your trained model on these images to see what kind of keypoints are produced. You should be able to see if your model is fitting each new face it sees, if the points are distributed randomly, or if the points have actually overfitted the training data and do not generalize. ``` net.load_state_dict(torch.load("saved_models/keypoints_model.pth", map_location="cpu")) # get a sample of test data again test_images, test_outputs, gt_pts = net_sample_output() print(test_images.data.size()) print(test_outputs.data.size()) print(gt_pts.size()) ## TODO: visualize your test output # you can use the same function as before, by un-commenting the line below: visualize_output(test_images, test_outputs, gt_pts) ``` Once you've found a good model (or two), save your model so you can load it and use it later! ``` ## TODO: change the name to something uniqe for each new model model_dir = 'saved_models/' model_name = 'keypoints_model_1.pt' # after training, save your model parameters in the dir 'saved_models' torch.save(net.state_dict(), model_dir+model_name) ``` After you've trained a well-performing model, answer the following questions so that we have some insight into your training and architecture selection process. Answering all questions is required to pass this project. ### Question 1: What optimization and loss functions did you choose and why? **Answer**: I tried 2 loss functions: Mean Square Error and Smooth L1. As for the optimizer, the recommended by the original Paper Adam was selected. Few expreriements were conducted with different hyper-paramers sets. Check report on that below. ### Question 2: What kind of network architecture did you start with and how did it change as you tried different architectures? Did you decide to add more convolutional layers or any layers to avoid overfitting the data? **Answer**: Naimish archtecture was selected as in Facial Key Points Detection using Deep Convolutional Neural Network paper. ### Question 3: How did you decide on the number of epochs and batch_size to train your model? **Answer**: There were attempts to train the model on both CPU and GPU. In a GPU case the batch size was selected based on the GPU memory available. As you can see above, a regularization technique Early Stopping was implemented with patience 3 which monitored test set loss. As soon training stopped improving the training loss for more than 3 epochs, the processed is stopped. Besides, the function saves the model with the lowest training loss. (check results.txt for more detailed info about training) ### CPU with MSE loss and 8 batch size This attempt reached test loss 0.698 on the 21st epoch and stopped on the 23rd. We consider this try as a baseline ### CPU with MSE loss and 128 batch size Increasing batch size significantly. Test loss reached 23.369 on 11th epoch and stopped shotly. We consider this test failed. ### GPU with MSE loss and 8 batch size Trying to repoduce CPU's result on GPU to see if it is any better. It reached 0.691 value on a test set on the 19th epoch and stopped. We can say that the performance is the same as on CPU. ### GPU with Smooth L1 loss and 8 batch size It reached test loss value 0.027 on 5th epoch! This is the fastest and the most effecient set of parameters. ## Feature Visualization Sometimes, neural networks are thought of as a black box, given some input, they learn to produce some output. CNN's are actually learning to recognize a variety of spatial patterns and you can visualize what each convolutional layer has been trained to recognize by looking at the weights that make up each convolutional kernel and applying those one at a time to a sample image. This technique is called feature visualization and it's useful for understanding the inner workings of a CNN. In the cell below, you can see how to extract a single filter (by index) from your first convolutional layer. The filter should appear as a grayscale grid. ``` # Get the weights in the first conv layer, "conv1" # if necessary, change this to reflect the name of your first conv layer weights1 = net.conv_1.weight.data w = weights1.numpy() filter_index = 0 print(w[filter_index][0]) print(w[filter_index][0].shape) # display the filter weights plt.imshow(w[filter_index][0], cmap='gray') ``` ## Feature maps Each CNN has at least one convolutional layer that is composed of stacked filters (also known as convolutional kernels). As a CNN trains, it learns what weights to include in it's convolutional kernels and when these kernels are applied to some input image, they produce a set of **feature maps**. So, feature maps are just sets of filtered images; they are the images produced by applying a convolutional kernel to an input image. These maps show us the features that the different layers of the neural network learn to extract. For example, you might imagine a convolutional kernel that detects the vertical edges of a face or another one that detects the corners of eyes. You can see what kind of features each of these kernels detects by applying them to an image. One such example is shown below; from the way it brings out the lines in an the image, you might characterize this as an edge detection filter. <img src='images/feature_map_ex.png' width=50% height=50%/> Next, choose a test image and filter it with one of the convolutional kernels in your trained CNN; look at the filtered output to get an idea what that particular kernel detects. ### TODO: Filter an image to see the effect of a convolutional kernel --- ``` import cv2 weights1 = net.conv_1.weight.data w = weights1.numpy() filter_index = 0 print(w[filter_index][0]) print(w[filter_index][0].shape) idx_img = 0 img = np.squeeze(test_images[idx_img].data.numpy()) plt.imshow(w[filter_index][0], cmap='gray') filtered_img = cv2.filter2D(img, -1, w[filter_index][0]) plt.imshow(filtered_img, cmap="gray") ``` ### Question 4: Choose one filter from your trained CNN and apply it to a test image; what purpose do you think it plays? What kind of feature do you think it detects? **Answer**: I seems like this filter blurs an image a bit. --- ## Moving on! Now that you've defined and trained your model (and saved the best model), you are ready to move on to the last notebook, which combines a face detector with your saved model to create a facial keypoint detection system that can predict the keypoints on *any* face in an image!
github_jupyter
# **Legal BERT-th (FineTuning)** ``` %cd bert_finetuning pwd ``` > Install and import libraries ``` !pip install tensorflow-gpu==1.15 import tensorflow print(tensorflow.__version__) # Install sentencepiece >> used for tokenizing Thai senetences !pip install sentencepiece # Install gdown for downloading files from google drive !pip install gdown import os import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt ``` # **Download required files** > Thai Pretrained BERT model - bert_base_th (ThAIKeras) ``` !gdown --id 1J3uuXZr_Se_XIFHj7zlTJ-C9wzI9W_ot # use only id in the link https://drive.google.com/uc?id=1J3uuXZr_Se_XIFHj7zlTJ-C9wzI9W_ot !unzip bert_base_th.zip ``` > th_wiki_bpe ``` !gdown --id 1F7pCgt3vPlarI9RxKtOZUrC_67KMNQ1W os.mkdir('th_wiki_bpe') !unzip th_wiki_bpe.zip -d th_wiki_bpe ``` > BERT classifier finetuner modified for Thai https://github.com/ThAIKeras/bert ``` !gdown https://github.com/ThAIKeras/bert.git !git clone https://github.com/ThAIKeras/bert.git !https://github.com/ThAIKeras/bert.git ``` Now you should have these folders in the directory |-- bert |-- bert_base_th |-- th_wiki_bpe |-- truevoice-intent # Finetune the model > SAVE MODEL AS .pb using > ## --do_eval=true ``` os.mkdir('model_finetuning') # Declare path to parse when finetuning os.environ['BPE_DIR'] = 'th_wiki_bpe' #'/content/th_wiki_bpe' os.environ['DATA_DIR'] = 'law_dataset' #'/content/law_data' os.environ['OUTPUT_DIR'] = 'model_finetuning' #'/content/model' os.environ['BERT_BASE_DIR'] = 'Legalbert_th' #'/content/bert_base_th' # Run finetuning(Classes) !python bert/law_classifier.py \--task_name=legaldoc \--do_train=true \--do_eval=true \--do_export=true \--data_dir=$DATA_DIR \--vocab_file=$BPE_DIR/th.wiki.bpe.op25000.vocab \--bert_config_file=$BERT_BASE_DIR/bert_config.json \--init_checkpoint=$BERT_BASE_DIR/model.ckpt-20 \--max_seq_length=128 \--train_batch_size=32 \--learning_rate=5e-5 \--num_train_epochs=3.0 \--output_dir=$OUTPUT_DIR \--spm_file=$BPE_DIR/th.wiki.bpe.op25000.model ``` # BERT to Predict on Test Data ``` os.mkdir('output_predict2') os.environ['BPE_DIR'] = 'th_wiki_bpe' #'/content/th_wiki_bpe' os.environ['DATA_DIR'] = 'law_dataset' #'/content/law_data' os.environ['OUTPUT_DIR'] = 'output_predict2' #'/content/model' os.environ['BERT_FINE_DIR'] = 'model_finetuning2' #'/content/bert_base_th' # Run predict(Classes) !python bert/law_classifier.py \--task_name=legaldoc \--do_predict=true \--data_dir=$DATA_DIR \--vocab_file=$BPE_DIR/th.wiki.bpe.op25000.vocab \--bert_config_file=$BERT_BASE_DIR/bert_config.json \--init_checkpoint=$BERT_FINE_DIR/model.ckpt-456 \--max_seq_length=128 \--output_dir=$OUTPUT_DIR \--spm_file=$BPE_DIR/th.wiki.bpe.op25000.model ``` # Accuracy and Result **could test its accuracy by setting do_eval=true when finetuning. Here, I added a multi-class confusion matrix to give more information about the prediction in order to tune the model or come up with improvement strategi** ``` def plot_confusion_matrix(y_true, y_pred, classes, destination=None, normalize=False, title=None, cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if not title: if normalize: title = 'Normalized confusion matrix' else: title = 'Confusion matrix, without normalization' # Compute confusion matrix #cm = np.array([[ 11, 2 , 40 , 2], [ 0 , 0 , 0 , 0], [ 37 , 8 ,114 , 4], [ 11 , 0 ,33 , 1]]) cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data #classes = classes[unique_labels(y_true, y_pred)] if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) fig, ax = plt.subplots() im = ax.imshow(cm, interpolation='nearest', cmap=cmap) ax.figure.colorbar(im, ax=ax) # We want to show all ticks... ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), # ... and label them with the respective list entries xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label') # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=30, ha="right", rotation_mode="anchor") # Loop over data dimensions and create text annotations. fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black") fig.tight_layout() #plt.show() try: plt.savefig(destination) except: pass return ax def compare_answer(row): ''' Compares 2 columns of actual and predicted class. Returns: -- 1 if they are the same -- 0 otherwise. ''' if row['prediction'] == row['actual']: return 1 else: return 0 def benchmark(actual_path, pred_path, confusion_matrix_path): ''' Calculates model accuracy and confusion matrix Arguments: actual_path -- path of test file which contains actual labels. pred_path -- path of prediction file which contains predicted labels. Returns: accuracy -- accuracy of the prediction confusion matrix -- save as image file ''' # Read files to dataframes actual = pd.read_csv(actual_path, sep='\t') #actual = pd.read_csv(actual_path, error_bad_lines=False) pred = pd.read_csv(pred_path, sep='\t') # Create result dataframe result = pred.copy() result['prediction'] = result.idxmax(axis=1) result['actual'] = actual['labels'] result['correct'] = result.apply(lambda row: compare_answer(row), axis=1) # Calculate accuracy accuracy = sum(result['correct'])/len(result) # Confusion Matrix confusion_matrix = plot_confusion_matrix(result['actual'], result['prediction'], classes=['Violation', 'family', 'labor', 'contract', 'criminal'], destination=confusion_matrix_path, normalize=False, title='Confusion Matrix') return accuracy, result # Benchmark accuracy actual_path = 'law_dataset/test_labels.tsv' pred_path = 'output_predict2/test_results.tsv' confusion_matrix_path = 'output_predict2/confusion_matrix.png' accuracy, result = benchmark(actual_path, pred_path, confusion_matrix_path) print(accuracy) print(result) ```
github_jupyter
## Spatial data visualization with `tidycensus` **Location! Location! Location!** The location people live in tells us a lot about the space itself as well as the people who live in there. This demo is about spatial data visualization with `tidycensus` R package with two variables of interest -- population and race distribution. First we will get the big picture at the Virginia state scale, then will zoom in on northern Virginia in Washington DC metro area. ### Tools We do not have to start from scratch. As always the case, someone has already done the hard work for us so we can stand on their shoulders. [Kyle Walker](https://walkerke.github.io/) in this case has developed an `rstat` package called [`tidycensus`](https://github.com/walkerke/tidycensus). This package allows for easy access, analysis and visualization of Census Beureau data on hundreds of variables. Not that in Python you can not do spatial analysis/visualization of census data, but certainly not as easily as in R because of some excellent rstats packages available and tailored for this purpose. To be able to use `tidycensus` you'll need your own Census API Key. If you do not have one, [get one](https://www.census.gov/developers/). The only other library you'll need is `tidyverse`; and this is it! If you like interactive visialization of maps (i.e. zoom in zoom out etc.) you will need additional libraries and codes. --- Okay, so here we go ... ``` # import libraries library(tidycensus) library(tidyverse) options(tigris_use_cache = TRUE) # get your Census Bureau API key census_api_key("xxxx") # geting VA population data of counties vapop <- get_acs(state = "VA", geography = "county", variables = "B19013_001", geometry = TRUE) # plot VA population vapop %>% ggplot(aes(fill = estimate)) + geom_sf(color = NA) + coord_sf(crs = "+init=epsg:4326") + # original 26911 scale_fill_viridis_c(option = "viridis", direction=-1) # original: scale_fill_viridis_c(option = "magma") # specify races races <- c(White = "P005003", Black = "P005004", Asian = "P005006", Hispanic = "P004003") # get decennial data on races varace <- get_decennial(geography = "county", variables = races, state = "VA", geometry = TRUE, summary_var = "P001001") # plot race variables as a percent of total population varace %>% mutate(percent = 100 * (value / summary_value)) %>% # create a calculated column of percent value ggplot(aes(fill = percent)) + facet_wrap(~variable) + geom_sf(color = NA) + coord_sf(crs = "+init=epsg:4326") + scale_fill_viridis_c(option = "magma", direction=-1) ``` ## Zooming in on Northern Virginia ``` # specify counties that constitute Northern Virginia NOVA = c("Fairfax County", "Fairfax City", "Manassas Park City", "Arlington County", "Loudoun County", "Alexandria City", "Falls Church City", "Prince William County", "Manassas City") # get population data of NOVA cunties novapop <- get_acs(state = "VA", county = NOVA, geography = "tract", variables = "B19013_001", geometry = TRUE) # plot NOVA population novapop %>% ggplot(aes(fill = estimate)) + geom_sf(color = NA) + coord_sf(crs = "+init=epsg:4326") + scale_fill_viridis_c(option = "viridis", direction=-1) # original: scale_fill_viridis_c(option = "magma") # get decennial data on races novarace <- get_decennial(geography = "tract", variables = races, state = "VA", county = NOVA, geometry = TRUE, summary_var = "P001001") # plot race variables as a percent of total population novarace %>% mutate(percent = 100 * (value / summary_value)) %>% # create a calculated column of percent value ggplot(aes(fill = percent)) + facet_wrap(~variable) + geom_sf(color = NA) + coord_sf(crs = "+init=epsg:4326") + scale_fill_viridis_c(option = "magma", direction=-1) ```
github_jupyter
``` import os import numpy as np from tqdm import tqdm from src.data.loaders.ascad import ASCADData from src.dlla.berg import make_mlp from src.dlla.hw import prepare_traces_dl, dlla_known_p from src.pollution.gaussian_noise import gaussian_noise from src.tools.cache import cache_np from src.trace_set.database import Database from src.trace_set.pollution import Pollution, PollutionType from src.trace_set.set_hw import TraceSetHW from src.trace_set.window import get_windows, extract_traces # Source [EDIT] DB = Database.ascad_none # Limit number of traces [EDIT] LIMIT_PROF = None LIMIT_ATT = 1000 # Select targets and noise parameters RAW_TRACES, WINDOW_JITTER_PARAMS, GAUSS_PARAMS, LIMIT_RAW = [None] * 4 if DB is Database.ascad_none or DB is Database.ascad: TARGET_ROUND = 0 TARGET_BYTE = 0 WINDOW_JITTER_PARAMS = np.arange(0, 205, 5) GAUSS_PARAMS = np.arange(0, 205, 5) RAW_TRACES = ASCADData.raw()['traces'] LIMIT_RAW = -1 if DB is Database.ascad: TARGET_BYTE = 2 WINDOW_JITTER_PARAMS = np.arange(0, 2.05, .05) GAUSS_PARAMS = np.arange(0, 5.1, .1) if DB is Database.aisy: TARGET_ROUND = 4 TARGET_BYTE = 0 WINDOW_JITTER_PARAMS = np.arange(0, 460, 10) GAUSS_PARAMS = np.arange(0, 4100, 100) RAW_TRACES = cache_np("aisy_traces") # Select targets TRACE_SET = TraceSetHW(DB) SAMPLE_TRACE = TRACE_SET.profile()[0][0] WINDOW, WINDOW_CXT = get_windows(RAW_TRACES, SAMPLE_TRACE) # Isolate context trace for window jitter. # Gets cached, as this procedure takes some time (depending on disk read speed) X_CXT = cache_np(f"{DB.name}_x_cxt", extract_traces, RAW_TRACES, WINDOW_CXT)[:LIMIT_RAW] PROFILING_MASK = np.ones(len(X_CXT), dtype=bool) PROFILING_MASK[2::3] = 0 X_PROF, Y_PROF = TRACE_SET.profile_states() X_ATT, Y_ATT = TRACE_SET.attack_states() X_PROF_CXT = X_CXT[PROFILING_MASK] X_ATT_CXT = X_CXT[~PROFILING_MASK] def verify(db: Database, pollution: Pollution): """ Assess leakage from database by """ trace_set = TraceSetHW(db, pollution, (LIMIT_PROF, LIMIT_ATT)) x9, y9, x9_att, y9_att = prepare_traces_dl(*trace_set.profile(), *trace_set.attack()) mdl9 = make_mlp(x9, y9, progress=False) dlla9_p = dlla_known_p(mdl9, x9_att, y9_att) print(f"Pollution {pollution.type} ({pollution.parameter}): p-value ({dlla9_p}).") def desync(traces: np.ndarray, window: (int, int), sigma: float): start, end = window num_traces = len(traces) num_sample_points = end - start permutations = np.round(np.random.normal(scale=sigma, size=num_traces)).astype(int) if np.max(np.abs(permutations)) >= num_sample_points: raise Exception(f"Window jitter parameter ({sigma}) too high. PoI is not always within the resulting traces.") permutations += start res = np.ones((num_traces, num_sample_points), dtype=traces.dtype) for ix in tqdm(range(num_traces), f"Trace desynchronization, sigma={sigma}"): permutation = permutations[ix] res[ix] = traces[ix, permutation:permutation + num_sample_points] return res def apply_desync(db, x_prof_cxt, y_prof, x_att_cxt, y_att, window: (int, int), params: list): for param in params: pollution = Pollution(PollutionType.desync, param) out = TraceSetHW(db, pollution, (LIMIT_PROF, LIMIT_ATT)) if not os.path.exists(out.path): xn = desync(x_prof_cxt, window, param) xn_att = desync(x_att_cxt, window, param) out.create(xn, y_prof, xn_att, y_att) verify(db, pollution) apply_desync(DB, X_PROF_CXT, Y_PROF, X_ATT_CXT, Y_ATT, WINDOW, WINDOW_JITTER_PARAMS) def apply_gauss(db, params: list): for param in params: pollution = Pollution(PollutionType.gauss, param) default = TraceSetHW(db) out = TraceSetHW(db, pollution, (LIMIT_PROF, LIMIT_ATT)) x_prof, y_prof = default.profile_states() x_att, y_att = default.attack_states() if not os.path.exists(out.path): xn = gaussian_noise(x_prof, param) xn_att = gaussian_noise(x_att, param) out.create(xn, y_prof, xn_att, y_att) verify(db, pollution) apply_gauss(DB, GAUSS_PARAMS) ```
github_jupyter
# Machine Vision<br>Assignment 8 - Triangulation ## Personal details * **Name(s):** `` * **Student ID(s):** `` ## 1. Introduction In this assignment we will use a pair of stereo images to triangulate points in 3D. Let us first display the test images and 2D point correspondences. We also load 3D points mainly for testing purposes. The data is from __http://www.robots.ox.ac.uk/~vgg/data/data-mview.html__. ``` import cv2 import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D %matplotlib inline # Load image pair and convert to RGB left = cv2.imread('left.jpg') right = cv2.imread('right.jpg') left = cv2.cvtColor(left,cv2.COLOR_BGR2RGB) right = cv2.cvtColor(right,cv2.COLOR_BGR2RGB) # Load 2D points (2xN matrices) pts1 = np.load('pts1.npy') pts2 = np.load('pts2.npy') # Load 3D points (3xN matrix) pts3D = np.load('pts3D.npy') # Display 2D point correspondences plt.figure(figsize=(14,8)) plt.subplot(121) plt.imshow(left) plt.plot(pts1[0,:],pts1[1,:], '.r') plt.title('Left image') plt.axis('off') plt.subplot(122) plt.imshow(right) plt.plot(pts2[0,:],pts2[1,:], '.r') plt.title('Right image') plt.axis('off') ``` The camera projection matrices $\mathbf{C}$ and $\mathbf{C}'$ are also provided for both views. ``` # Load 3x4 camera projection matrices C1 = np.load('C1.npy') C2 = np.load('C2.npy') ``` ## 2. From 3D points to 2D points With the camera matrix $\mathbf{C} = \mathbf{K}[\mathbf{R} | \mathbf{t}]$ we can project 3D points to the 2D points on the image plane: $$ s \begin{pmatrix}u \\ v \\ 1 \end{pmatrix} = \mathbf{C} \begin{pmatrix} X \\ Y \\ Z \\ 1 \end{pmatrix} \qquad \qquad (1) $$ Note that we are using homogeneous coordinates. **2.1. Project 3D points** $\quad$ <font color=red>(0.50 points)</font> Complete the function `projectPts()` by following the instructions below. The function should project 3D points `pts3D` to 2D image points. The result should look like the previous figure. ``` # INPUT # pts3D : 3D points (X,Y,Z) (3xN matrix) # C : Camera projection matrix (3x4 matrix) # # OUTPUT # pts2D : 2D points (x,y) (2xN matrix) # def projectPts(pts3D,C): N = pts3D.shape[1] # Number of points # ---------- YOUR CODE STARTS HERE ----------- # This line can be removed pts2D = np.zeros((2,N), dtype=np.float_) # 1. Convert 3D points from Euclidean to # homogeneous coordinates (4xN matrix) newarray = np.append(pts3D,np.ones((1,N), dtype=np.float_)) hom3D = newarray.reshape(pts3D.shape[0]+1,N) # 2. Project points using Equation 1 projectpoints = np.matmul(C,hom3D) for i in range(N): projectpoints[0,i] = np.divide(projectpoints[0,i],projectpoints[2,i]) projectpoints[1,i] = np.divide(projectpoints[1,i],projectpoints[2,i]) projectpoints[2,i] = 1 # 3. Covert 2D points from homogeneous to # Euclidean coordinates (2xN matrix) hom2D = np.append(projectpoints[0,:],projectpoints[1,:]) pts2D = hom2D.reshape(2,N) # ----------- YOUR CODE ENDS HERE ------------ return pts2D # Project 3D points and visualize the result points1 = projectPts(pts3D,C1) points2 = projectPts(pts3D,C2) plt.figure(figsize=(14,8)) plt.subplot(121) plt.imshow(left) plt.plot(points1[0,:],points1[1,:], '.r') plt.title('Left image') plt.axis('off') plt.subplot(122) plt.imshow(right) plt.plot(points2[0,:],points2[1,:], '.r') plt.title('Right image') plt.axis('off') ``` ## 3. From 2D points to 3D points The lecture notes describe a linear method to triangulate a point observed in two cameras (slide 196). Given a point $\mathbf{X} = (X,Y,Z)^{\top}$ the projection equations are: $$ \begin{pmatrix} s u \\ s v \\ s \end{pmatrix} = \begin{pmatrix} c_{11} & c_{12} & c_{13} & c_{14} \\ c_{21} & c_{22} & c_{23} & c_{24} \\ c_{31} & c_{32} & c_{33} & c_{34} \end{pmatrix} \begin{pmatrix} X \\ Y \\ Z \\ 1 \end{pmatrix} $$ $$ \begin{pmatrix} t u' \\ t v' \\ t \end{pmatrix} = \begin{pmatrix} c_{11}' & c_{12}' & c_{13}' & c_{14}' \\ c_{21}' & c_{22}' & c_{23}' & c_{24}' \\ c_{31}' & c_{32}' & c_{33}' & c_{34}' \end{pmatrix} \begin{pmatrix} X \\ Y \\ Z \\ 1 \end{pmatrix} $$ Here we have used the same notation as in the exercises. Eliminating $s$ and $t$ we obtain the system of equations: $$ (c_{31} u - c_{11}) X + (c_{32} u - c_{12}) Y + (c_{33} u - c_{13}) Z = c_{14} - c_{34} u \\ (c_{31} v - c_{21}) X + (c_{32} v - c_{22}) Y + (c_{33} v - c_{23}) Z = c_{24} - c_{34} v \\ (c_{31}' u' - c_{11}') X + (c_{32}' u' - c_{12}') Y + (c_{33}' u' - c_{13}') Z = c_{14}' - c_{34}' u' \\ (c_{31}' v' - c_{21}') X + (c_{32}' v' - c_{22}') Y + (c_{33}' v' - c_{23}') Z = c_{24}' - c_{34}' v' $$ which can be expressed in a linear system of the form: $\mathbf{Ax} = \mathbf{b}$ and solved using the least squares method. *See the Exercise 8 (Q2)*. <br>Notice also that equations are different from the ones presented in the lecture slides. **3.1. Triangulate** $\quad$ <font color=red>(1.50 points)</font> Complete the function `triangulatePts()`. Estimate 3D points given 2D points `pts1` and `pts2` and projection matrices `C1` and `C2`. For each point, form a linear system $\mathbf{Ax} = \mathbf{b}$, where $\mathbf{A}$ is a $4 \times 3$ matrix and $\mathbf{b}$ is a $4 \times 1$ vector. The least-squares solution can be obtained by $\hat{\mathbf{x}} = (\mathbf{A}^{\top} \mathbf{A})^{-1} \mathbf{A}^{\top} \mathbf{b}$, where $\hat{\mathbf{x}}$ contains the coordinates of the 3D point $(X,Y,Z)$. Once you have completed the function, execute the following code cell. The implementation is correct if the estimated 3D points `points3D` overlap with given 3D points `pts3D`. ``` # INPUT # pts1 : 2D points from the first image (2xN matrix) # pts2 : 2D points from the second image (2xN matrix) # C1 : Camera matrix for the first image (3x4 matrix) # C2 : Camera matrix for the second image (3x4 matrix) # # OUTPUT # pts3D : Triangulated 3D points (X,Y,Z) (3xN matrix) # from numpy.linalg import inv def triangulatePts(pts1,pts2,C1,C2): N = pts1.shape[1] # Number of points pts3D = np.zeros((3,N),dtype=np.float_) # ---------- YOUR CODE STARTS HERE ----------- # 1. For each point i, form A and b A = np.zeros((4,3),dtype=np.float_) b = np.zeros((4,1),dtype=np.float_) for i in range(N): A[0,0]=C1[2][0]*pts1[0,i]-C1[0][0] A[0,1]=C1[2][1]*pts1[0,i]-C1[0][1] A[0,2]=C1[2][2]*pts1[0,i]-C1[0][2] A[1,0]=C1[2][0] * pts1[1,i]-C1[1][0] A[1,1]=C1[2][1] * pts1[1,i]-C1[1][1] A[1,2]=C1[2][2] * pts1[1,i]-C1[1][2] A[2,0]=C2[2][0]*pts2[0,i]-C2[0][0] A[2,1]=C2[2][1]*pts2[0,i]-C2[0][1] A[2,2]=C2[2][2]*pts2[0,i]-C2[0][2] A[3,0]=C2[2][0] * pts2[1,i]-C2[1][0] A[3,1]=C2[2][1] * pts2[1,i]-C2[1][1] A[3,2]=C2[2][2] * pts2[1,i]-C2[1][2] b[0,0]=C1[0][3]-C1[2][3]*pts1[0,i] b[1,0]=C1[1][3]-C1[2][3]*pts1[1,i] b[2,0]=C2[0][3]-C2[2][3]*pts2[0,i] b[3,0]=C2[1][3]-C2[2][3]*pts2[1,i] # 2. Find solution to linear system Ax=b points = np.zeros((3,1),dtype=np.float_) points = np.matmul( np.matmul(inv(np.matmul(np.transpose(A) , A )), np.transpose(A)), b) # 3. Save triangulated point to pts3D[i,:] pts3D[0,i]=points[0] pts3D[1,i]=points[1] pts3D[2,i]=points[2] # ----------- YOUR CODE ENDS HERE ------------ return pts3D # Triangulate points and compare to given 3D points points3D = triangulatePts(pts1,pts2,C1,C2) fig = plt.figure(figsize=(12,12)) ax = fig.add_subplot(111, projection='3d') ax.scatter(-pts3D[1,:],pts3D[2,:],-pts3D[0,:], color='blue', label='Given 3D points') ax.scatter(-points3D[1,:],points3D[2,:],-points3D[0,:], color='red', label='Estimated 3D points') ax.set_xlabel('Y') ax.set_ylabel('Z') ax.set_zlabel('X') ax.set_title('Comparison of 3D points') ax.legend() ``` # Aftermath Finally, fill your answers to the following questions: **How much time did you need to complete this exercise?** `2 hours.` **Did you experience any problems with the exercise? Was there enough help available? Should this notebook be more (or less) detailed?** `None.` # References `None!` # Submission 1. Click on the menu `Kernel -> Restart & Clear Output` to clear all outputs and variables, etc. 2. Compress the resulting Jupyter notebook (`MV_A8.ipynb` file) into **`MV_A8_[student number(s)].zip`** (e.g. `MV_A8_1234567.zip` if solo work or `MV_A8_1234567-7654321.zip` if pair work). 3. Send an email to janne.mustaniemi@oulu.fi with the subject line `MV_A8_[student number(s)]`. Remember to attach the .zip file. **Deadline of the assignment 10.3.2019**
github_jupyter
# Import necessary depencencies ``` import pandas as pd import numpy as np import text_normalizer as tn import model_evaluation_utils as meu np.set_printoptions(precision=2, linewidth=80) ``` # Load and normalize data ``` dataset = pd.read_csv(r'movie_reviews.csv') reviews = np.array(dataset['review']) sentiments = np.array(dataset['sentiment']) # extract data for model evaluation test_reviews = reviews[35000:] test_sentiments = sentiments[35000:] sample_review_ids = [7626, 3533, 13010] # normalize dataset norm_test_reviews = tn.normalize_corpus(test_reviews) ``` # Sentiment Analysis with AFINN ``` from afinn import Afinn afn = Afinn(emoticons=True) ``` ## Predict sentiment for sample reviews ``` for review, sentiment in zip(test_reviews[sample_review_ids], test_sentiments[sample_review_ids]): print('REVIEW:', review) print('Actual Sentiment:', sentiment) print('Predicted Sentiment polarity:', afn.score(review)) print('-'*60) ``` ## Predict sentiment for test dataset ``` sentiment_polarity = [afn.score(review) for review in test_reviews] predicted_sentiments = ['positive' if score >= 1.0 else 'negative' for score in sentiment_polarity] ``` ## Evaluate model performance ``` meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=predicted_sentiments, classes=['positive', 'negative']) ``` # Sentiment Analysis with SentiWordNet ``` from nltk.corpus import sentiwordnet as swn awesome = list(swn.senti_synsets('awesome', 'a'))[0] print('Positive Polarity Score:', awesome.pos_score()) print('Negative Polarity Score:', awesome.neg_score()) print('Objective Score:', awesome.obj_score()) ``` ## Build model ``` def analyze_sentiment_sentiwordnet_lexicon(review, verbose=False): # tokenize and POS tag text tokens tagged_text = [(token.text, token.tag_) for token in tn.nlp(review)] pos_score = neg_score = token_count = obj_score = 0 # get wordnet synsets based on POS tags # get sentiment scores if synsets are found for word, tag in tagged_text: ss_set = None if 'NN' in tag and list(swn.senti_synsets(word, 'n')): ss_set = list(swn.senti_synsets(word, 'n'))[0] elif 'VB' in tag and list(swn.senti_synsets(word, 'v')): ss_set = list(swn.senti_synsets(word, 'v'))[0] elif 'JJ' in tag and list(swn.senti_synsets(word, 'a')): ss_set = list(swn.senti_synsets(word, 'a'))[0] elif 'RB' in tag and list(swn.senti_synsets(word, 'r')): ss_set = list(swn.senti_synsets(word, 'r'))[0] # if senti-synset is found if ss_set: # add scores for all found synsets pos_score += ss_set.pos_score() neg_score += ss_set.neg_score() obj_score += ss_set.obj_score() token_count += 1 # aggregate final scores final_score = pos_score - neg_score norm_final_score = round(float(final_score) / token_count, 2) final_sentiment = 'positive' if norm_final_score >= 0 else 'negative' if verbose: norm_obj_score = round(float(obj_score) / token_count, 2) norm_pos_score = round(float(pos_score) / token_count, 2) norm_neg_score = round(float(neg_score) / token_count, 2) # to display results in a nice table sentiment_frame = pd.DataFrame([[final_sentiment, norm_obj_score, norm_pos_score, norm_neg_score, norm_final_score]], columns=pd.MultiIndex(levels=[['SENTIMENT STATS:'], ['Predicted Sentiment', 'Objectivity', 'Positive', 'Negative', 'Overall']], labels=[[0,0,0,0,0],[0,1,2,3,4]])) print(sentiment_frame) return final_sentiment ``` ## Predict sentiment for sample reviews ``` for review, sentiment in zip(test_reviews[sample_review_ids], test_sentiments[sample_review_ids]): print('REVIEW:', review) print('Actual Sentiment:', sentiment) pred = analyze_sentiment_sentiwordnet_lexicon(review, verbose=True) print('-'*60) ``` ## Predict sentiment for test dataset ``` predicted_sentiments = [analyze_sentiment_sentiwordnet_lexicon(review, verbose=False) for review in norm_test_reviews] ``` ## Evaluate model performance ``` meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=predicted_sentiments, classes=['positive', 'negative']) ``` # Sentiment Analysis with VADER ``` from nltk.sentiment.vader import SentimentIntensityAnalyzer ``` ## Build model ``` def analyze_sentiment_vader_lexicon(review, threshold=0.1, verbose=False): # pre-process text review = tn.strip_html_tags(review) review = tn.remove_accented_chars(review) review = tn.expand_contractions(review) # analyze the sentiment for review analyzer = SentimentIntensityAnalyzer() scores = analyzer.polarity_scores(review) # get aggregate scores and final sentiment agg_score = scores['compound'] final_sentiment = 'positive' if agg_score >= threshold\ else 'negative' if verbose: # display detailed sentiment statistics positive = str(round(scores['pos'], 2)*100)+'%' final = round(agg_score, 2) negative = str(round(scores['neg'], 2)*100)+'%' neutral = str(round(scores['neu'], 2)*100)+'%' sentiment_frame = pd.DataFrame([[final_sentiment, final, positive, negative, neutral]], columns=pd.MultiIndex(levels=[['SENTIMENT STATS:'], ['Predicted Sentiment', 'Polarity Score', 'Positive', 'Negative', 'Neutral']], labels=[[0,0,0,0,0],[0,1,2,3,4]])) print(sentiment_frame) return final_sentiment ``` ## Predict sentiment for sample reviews ``` for review, sentiment in zip(test_reviews[sample_review_ids], test_sentiments[sample_review_ids]): print('REVIEW:', review) print('Actual Sentiment:', sentiment) pred = analyze_sentiment_vader_lexicon(review, threshold=0.4, verbose=True) print('-'*60) ``` ## Predict sentiment for test dataset ``` predicted_sentiments = [analyze_sentiment_vader_lexicon(review, threshold=0.4, verbose=False) for review in test_reviews] ``` ## Evaluate model performance ``` meu.display_model_performance_metrics(true_labels=test_sentiments, predicted_labels=predicted_sentiments, classes=['positive', 'negative']) ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns sns.set_style("whitegrid") # Applying gradient descent algorithm theta= 3 alpha = 0.1 data = [] for i in range(0,10): res = alpha * 2 * theta # update rule print("{0:.4f} {1:.4f}".format(theta, res)) data.append([theta, theta**2]) theta = theta - res tmp = pd.DataFrame(data) tmp # plotting plt.figure(figsize=(8,6)) plt.plot(np.linspace(-2, 4, 100), np.linspace(-2, 4, 100) **2); plt.scatter(tmp.iloc[:,0], tmp.iloc[:,1], marker= 'X'); plt.xlabel('theta'); plt.ylabel('J(theta)'); ``` #### Second Example $$J(\theta) = \theta^4 + \theta^2$$ $$\frac{d}{d\theta}.J(\theta) = 4\theta^3 + 2\theta$$ $$\theta:= \theta-\alpha . (4\theta^3 + 2\theta)$$ ``` theta = 3 alpha = 0.01 data = [] for i in range(0,10): res = alpha * (4* theta **3 + 2*theta) # update function print("{0:.4f} {1:.4f}".format(theta, res)) data.append([theta, theta**4 + theta**2]) theta = theta - res tmp = pd.DataFrame(data) tmp # plotting plt.figure(figsize=(8,6)) x_grid = np.linspace(-2, 4, 100) plt.plot(x_grid, x_grid**4 + x_grid**2); plt.scatter(tmp.iloc[:,0], tmp.iloc[:,1], marker='X'); plt.xlabel('theta') plt.ylabel('J(theta)') ``` # Applying Gradient Descent Algorithm to Boston House Data ``` from sklearn.datasets import load_boston # loading boston house data boston_data = load_boston() # making a dataframae for boston house data df = pd.DataFrame(boston_data.data, columns= boston_data.feature_names) # visualize dataset df.head() # taking one feature X = df[['LSTAT']].values # targets y = boston_data.target from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() sc_y = StandardScaler() X_std = sc_x.fit_transform(X) y_std = sc_y.fit_transform(y.reshape(-1, 1)).flatten() print("For X: ") for i in range(5): print(X[i]) print('=================================================') print("For X_std: ") for i in range(5): print(X_std[i]) print('=================================================') print("For y: ") for i in range(5): print(y[i]) print('=================================================') print("For y_std: ") for i in range(5): print(y_std[i]) print('=================================================') print("Shape of X_std: ", X_std.shape) print("Shape of y_std: ", y_std.shape) alpha = 0.0001 w_ = np.zeros(1+ X_std.shape[1]) cost_ = [] n_ = 100 for i in range(n_): y_pred = np.dot(X_std, w_[1:]) + w_[0] errors = (y_std - y_pred) # updatig weights w_[1:] += alpha * X_std.T.dot(errors) w_[0] += alpha * errors.sum() cost = (errors**2).sum() / 2.0 cost_.append(cost) # plot plt.figure(figsize=(10,8)) plt.plot(range(1, n_ + 1), cost_); plt.ylabel('SSE') plt.xlabel('Epoch') ```
github_jupyter
# RadarCOVID-Report ## Data Extraction ``` import datetime import json import logging import os import shutil import tempfile import textwrap import uuid import matplotlib.pyplot as plt import matplotlib.ticker import numpy as np import pandas as pd import retry import seaborn as sns %matplotlib inline current_working_directory = os.environ.get("PWD") if current_working_directory: os.chdir(current_working_directory) sns.set() matplotlib.rcParams["figure.figsize"] = (15, 6) extraction_datetime = datetime.datetime.utcnow() extraction_date = extraction_datetime.strftime("%Y-%m-%d") extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1) extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d") extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H") current_hour = datetime.datetime.utcnow().hour are_today_results_partial = current_hour != 23 ``` ### Constants ``` from Modules.ExposureNotification import exposure_notification_io spain_region_country_code = "ES" germany_region_country_code = "DE" default_backend_identifier = spain_region_country_code backend_generation_days = 7 * 2 daily_summary_days = 7 * 4 * 3 daily_plot_days = 7 * 4 tek_dumps_load_limit = daily_summary_days + 1 ``` ### Parameters ``` environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER") if environment_backend_identifier: report_backend_identifier = environment_backend_identifier else: report_backend_identifier = default_backend_identifier report_backend_identifier environment_enable_multi_backend_download = \ os.environ.get("RADARCOVID_REPORT__ENABLE_MULTI_BACKEND_DOWNLOAD") if environment_enable_multi_backend_download: report_backend_identifiers = None else: report_backend_identifiers = [report_backend_identifier] report_backend_identifiers environment_invalid_shared_diagnoses_dates = \ os.environ.get("RADARCOVID_REPORT__INVALID_SHARED_DIAGNOSES_DATES") if environment_invalid_shared_diagnoses_dates: invalid_shared_diagnoses_dates = environment_invalid_shared_diagnoses_dates.split(",") else: invalid_shared_diagnoses_dates = [] invalid_shared_diagnoses_dates ``` ### COVID-19 Cases ``` report_backend_client = \ exposure_notification_io.get_backend_client_with_identifier( backend_identifier=report_backend_identifier) @retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10)) def download_cases_dataframe_from_ecdc(): return pd.read_csv( "https://opendata.ecdc.europa.eu/covid19/casedistribution/csv/data.csv") confirmed_df_ = download_cases_dataframe_from_ecdc() confirmed_df = confirmed_df_.copy() confirmed_df = confirmed_df[["dateRep", "cases", "geoId"]] confirmed_df.rename( columns={ "dateRep":"sample_date", "cases": "new_cases", "geoId": "country_code", }, inplace=True) confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True) confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_df.sort_values("sample_date", inplace=True) confirmed_df.tail() def sort_source_regions_for_display(source_regions: list) -> list: if report_backend_identifier in source_regions: source_regions = [report_backend_identifier] + \ list(sorted(set(source_regions).difference([report_backend_identifier]))) else: source_regions = list(sorted(source_regions)) return source_regions report_source_regions = report_backend_client.source_regions_for_date( date=extraction_datetime.date()) report_source_regions = sort_source_regions_for_display( source_regions=report_source_regions) report_source_regions confirmed_days = pd.date_range( start=confirmed_df.iloc[0].sample_date, end=extraction_datetime) confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"]) confirmed_days_df["sample_date_string"] = \ confirmed_days_df.sample_date.dt.strftime("%Y-%m-%d") confirmed_days_df.tail() source_regions_at_date_df = confirmed_days_df.copy() source_regions_at_date_df["source_regions_at_date"] = \ source_regions_at_date_df.sample_date.apply( lambda x: report_backend_client.source_regions_for_date(date=x)) source_regions_at_date_df.sort_values("sample_date", inplace=True) source_regions_at_date_df["_source_regions_group"] = source_regions_at_date_df. \ source_regions_at_date.apply(lambda x: ",".join(sort_source_regions_for_display(x))) source_regions_at_date_df.tail() source_regions_for_summary_df = \ source_regions_at_date_df[["sample_date", "_source_regions_group"]].copy() source_regions_for_summary_df.rename(columns={"_source_regions_group": "source_regions"}, inplace=True) source_regions_for_summary_df.tail() confirmed_output_columns = ["sample_date", "new_cases", "covid_cases"] confirmed_output_df = pd.DataFrame(columns=confirmed_output_columns) for source_regions_group, source_regions_group_series in \ source_regions_at_date_df.groupby("_source_regions_group"): source_regions_set = set(source_regions_group.split(",")) confirmed_source_regions_set_df = \ confirmed_df[confirmed_df.country_code.isin(source_regions_set)].copy() confirmed_source_regions_group_df = \ confirmed_source_regions_set_df.groupby("sample_date").new_cases.sum() \ .reset_index().sort_values("sample_date") confirmed_source_regions_group_df["covid_cases"] = \ confirmed_source_regions_group_df.new_cases.rolling(7, min_periods=0).mean().round() confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[confirmed_output_columns] confirmed_source_regions_group_df.fillna(method="ffill", inplace=True) confirmed_source_regions_group_df = \ confirmed_source_regions_group_df[ confirmed_source_regions_group_df.sample_date.isin( source_regions_group_series.sample_date_string)] confirmed_output_df = confirmed_output_df.append(confirmed_source_regions_group_df) confirmed_df = confirmed_output_df.copy() confirmed_df.tail() confirmed_df.rename(columns={"sample_date": "sample_date_string"}, inplace=True) confirmed_df = confirmed_days_df[["sample_date_string"]].merge(confirmed_df, how="left") confirmed_df.sort_values("sample_date_string", inplace=True) confirmed_df.fillna(method="ffill", inplace=True) confirmed_df.tail() confirmed_df[["new_cases", "covid_cases"]].plot() ``` ### Extract API TEKs ``` raw_zip_path_prefix = "Data/TEKs/Raw/" fail_on_error_backend_identifiers = [report_backend_identifier] multi_backend_exposure_keys_df = \ exposure_notification_io.download_exposure_keys_from_backends( backend_identifiers=report_backend_identifiers, generation_days=backend_generation_days, fail_on_error_backend_identifiers=fail_on_error_backend_identifiers, save_raw_zip_path_prefix=raw_zip_path_prefix) multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"] multi_backend_exposure_keys_df.rename( columns={ "generation_datetime": "sample_datetime", "generation_date_string": "sample_date_string", }, inplace=True) multi_backend_exposure_keys_df.head() early_teks_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.rolling_period < 144].copy() early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6 early_teks_df[early_teks_df.sample_date_string != extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) early_teks_df[early_teks_df.sample_date_string == extraction_date] \ .rolling_period_in_hours.hist(bins=list(range(24))) multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[ "sample_date_string", "region", "key_data"]] multi_backend_exposure_keys_df.head() active_regions = \ multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() active_regions multi_backend_summary_df = multi_backend_exposure_keys_df.groupby( ["sample_date_string", "region"]).key_data.nunique().reset_index() \ .pivot(index="sample_date_string", columns="region") \ .sort_index(ascending=False) multi_backend_summary_df.rename( columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) multi_backend_summary_df.rename_axis("sample_date", inplace=True) multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int) multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days) multi_backend_summary_df.head() def compute_keys_cross_sharing(x): teks_x = x.key_data_x.item() common_teks = set(teks_x).intersection(x.key_data_y.item()) common_teks_fraction = len(common_teks) / len(teks_x) return pd.Series(dict( common_teks=common_teks, common_teks_fraction=common_teks_fraction, )) multi_backend_exposure_keys_by_region_df = \ multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index() multi_backend_exposure_keys_by_region_df["_merge"] = True multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_df.merge( multi_backend_exposure_keys_by_region_df, on="_merge") multi_backend_exposure_keys_by_region_combination_df.drop( columns=["_merge"], inplace=True) if multi_backend_exposure_keys_by_region_combination_df.region_x.nunique() > 1: multi_backend_exposure_keys_by_region_combination_df = \ multi_backend_exposure_keys_by_region_combination_df[ multi_backend_exposure_keys_by_region_combination_df.region_x != multi_backend_exposure_keys_by_region_combination_df.region_y] multi_backend_exposure_keys_cross_sharing_df = \ multi_backend_exposure_keys_by_region_combination_df \ .groupby(["region_x", "region_y"]) \ .apply(compute_keys_cross_sharing) \ .reset_index() multi_backend_cross_sharing_summary_df = \ multi_backend_exposure_keys_cross_sharing_df.pivot_table( values=["common_teks_fraction"], columns="region_x", index="region_y", aggfunc=lambda x: x.item()) multi_backend_cross_sharing_summary_df multi_backend_without_active_region_exposure_keys_df = \ multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier] multi_backend_without_active_region = \ multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist() multi_backend_without_active_region exposure_keys_summary_df = multi_backend_exposure_keys_df[ multi_backend_exposure_keys_df.region == report_backend_identifier] exposure_keys_summary_df.drop(columns=["region"], inplace=True) exposure_keys_summary_df = \ exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame() exposure_keys_summary_df = \ exposure_keys_summary_df.reset_index().set_index("sample_date_string") exposure_keys_summary_df.sort_index(ascending=False, inplace=True) exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True) exposure_keys_summary_df.head() ``` ### Dump API TEKs ``` tek_list_df = multi_backend_exposure_keys_df[ ["sample_date_string", "region", "key_data"]].copy() tek_list_df["key_data"] = tek_list_df["key_data"].apply(str) tek_list_df.rename(columns={ "sample_date_string": "sample_date", "key_data": "tek_list"}, inplace=True) tek_list_df = tek_list_df.groupby( ["sample_date", "region"]).tek_list.unique().reset_index() tek_list_df["extraction_date"] = extraction_date tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour tek_list_path_prefix = "Data/TEKs/" tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json" tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json" tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json" for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]: os.makedirs(os.path.dirname(path), exist_ok=True) tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json( tek_list_current_path, lines=True, orient="records") tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json( tek_list_daily_path, lines=True, orient="records") tek_list_df.to_json( tek_list_hourly_path, lines=True, orient="records") tek_list_df.head() ``` ### Load TEK Dumps ``` import glob def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame: extracted_teks_df = pd.DataFrame(columns=["region"]) file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json")))) if limit: file_paths = file_paths[:limit] for file_path in file_paths: logging.info(f"Loading TEKs from '{file_path}'...") iteration_extracted_teks_df = pd.read_json(file_path, lines=True) extracted_teks_df = extracted_teks_df.append( iteration_extracted_teks_df, sort=False) extracted_teks_df["region"] = \ extracted_teks_df.region.fillna(spain_region_country_code).copy() if region: extracted_teks_df = \ extracted_teks_df[extracted_teks_df.region == region] return extracted_teks_df daily_extracted_teks_df = load_extracted_teks( mode="Daily", region=report_backend_identifier, limit=tek_dumps_load_limit) daily_extracted_teks_df.head() exposure_keys_summary_df_ = daily_extracted_teks_df \ .sort_values("extraction_date", ascending=False) \ .groupby("sample_date").tek_list.first() \ .to_frame() exposure_keys_summary_df_.index.name = "sample_date_string" exposure_keys_summary_df_["tek_list"] = \ exposure_keys_summary_df_.tek_list.apply(len) exposure_keys_summary_df_ = exposure_keys_summary_df_ \ .rename(columns={"tek_list": "shared_teks_by_generation_date"}) \ .sort_index(ascending=False) exposure_keys_summary_df = exposure_keys_summary_df_ exposure_keys_summary_df.head() ``` ### Daily New TEKs ``` tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply( lambda x: set(sum(x, []))).reset_index() tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True) tek_list_df.head() def compute_teks_by_generation_and_upload_date(date): day_new_teks_set_df = tek_list_df.copy().diff() try: day_new_teks_set = day_new_teks_set_df[ day_new_teks_set_df.index == date].tek_list.item() except ValueError: day_new_teks_set = None if pd.isna(day_new_teks_set): day_new_teks_set = set() day_new_teks_df = daily_extracted_teks_df[ daily_extracted_teks_df.extraction_date == date].copy() day_new_teks_df["shared_teks"] = \ day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set)) day_new_teks_df["shared_teks"] = \ day_new_teks_df.shared_teks.apply(len) day_new_teks_df["upload_date"] = date day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True) day_new_teks_df = day_new_teks_df[ ["upload_date", "generation_date", "shared_teks"]] day_new_teks_df["generation_to_upload_days"] = \ (pd.to_datetime(day_new_teks_df.upload_date) - pd.to_datetime(day_new_teks_df.generation_date)).dt.days day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0] return day_new_teks_df shared_teks_generation_to_upload_df = pd.DataFrame() for upload_date in daily_extracted_teks_df.extraction_date.unique(): shared_teks_generation_to_upload_df = \ shared_teks_generation_to_upload_df.append( compute_teks_by_generation_and_upload_date(date=upload_date)) shared_teks_generation_to_upload_df \ .sort_values(["upload_date", "generation_date"], ascending=False, inplace=True) shared_teks_generation_to_upload_df.tail() today_new_teks_df = \ shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.upload_date == extraction_date].copy() today_new_teks_df.tail() if not today_new_teks_df.empty: today_new_teks_df.set_index("generation_to_upload_days") \ .sort_index().shared_teks.plot.bar() generation_to_upload_period_pivot_df = \ shared_teks_generation_to_upload_df[ ["upload_date", "generation_to_upload_days", "shared_teks"]] \ .pivot(index="upload_date", columns="generation_to_upload_days") \ .sort_index(ascending=False).fillna(0).astype(int) \ .droplevel(level=0, axis=1) generation_to_upload_period_pivot_df.head() new_tek_df = tek_list_df.diff().tek_list.apply( lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index() new_tek_df.rename(columns={ "tek_list": "shared_teks_by_upload_date", "extraction_date": "sample_date_string",}, inplace=True) new_tek_df.tail() shared_teks_uploaded_on_generation_date_df = shared_teks_generation_to_upload_df[ shared_teks_generation_to_upload_df.generation_to_upload_days == 0] \ [["upload_date", "shared_teks"]].rename( columns={ "upload_date": "sample_date_string", "shared_teks": "shared_teks_uploaded_on_generation_date", }) shared_teks_uploaded_on_generation_date_df.head() estimated_shared_diagnoses_df = shared_teks_generation_to_upload_df \ .groupby(["upload_date"]).shared_teks.max().reset_index() \ .sort_values(["upload_date"], ascending=False) \ .rename(columns={ "upload_date": "sample_date_string", "shared_teks": "shared_diagnoses", }) invalid_shared_diagnoses_dates_mask = \ estimated_shared_diagnoses_df.sample_date_string.isin(invalid_shared_diagnoses_dates) estimated_shared_diagnoses_df[invalid_shared_diagnoses_dates_mask] = 0 estimated_shared_diagnoses_df.head() ``` ### Hourly New TEKs ``` hourly_extracted_teks_df = load_extracted_teks( mode="Hourly", region=report_backend_identifier, limit=25) hourly_extracted_teks_df.head() hourly_new_tek_count_df = hourly_extracted_teks_df \ .groupby("extraction_date_with_hour").tek_list. \ apply(lambda x: set(sum(x, []))).reset_index().copy() hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \ .sort_index(ascending=True) hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff() hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply( lambda x: len(x) if not pd.isna(x) else 0) hourly_new_tek_count_df.rename(columns={ "new_tek_count": "shared_teks_by_upload_date"}, inplace=True) hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[ "extraction_date_with_hour", "shared_teks_by_upload_date"]] hourly_new_tek_count_df.head() hourly_summary_df = hourly_new_tek_count_df.copy() hourly_summary_df.set_index("extraction_date_with_hour", inplace=True) hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index() hourly_summary_df["datetime_utc"] = pd.to_datetime( hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H") hourly_summary_df.set_index("datetime_utc", inplace=True) hourly_summary_df = hourly_summary_df.tail(-1) hourly_summary_df.head() ``` ### Data Merge ``` result_summary_df = exposure_keys_summary_df.merge( new_tek_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = result_summary_df.merge( estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer") result_summary_df.head() result_summary_df = confirmed_df.tail(daily_summary_days).merge( result_summary_df, on=["sample_date_string"], how="left") result_summary_df.head() result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string) result_summary_df = result_summary_df.merge(source_regions_for_summary_df, how="left") result_summary_df.set_index(["sample_date", "source_regions"], inplace=True) result_summary_df.drop(columns=["sample_date_string"], inplace=True) result_summary_df.sort_index(ascending=False, inplace=True) result_summary_df.head() with pd.option_context("mode.use_inf_as_na", True): result_summary_df = result_summary_df.fillna(0).astype(int) result_summary_df["teks_per_shared_diagnosis"] = \ (result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0) result_summary_df["shared_diagnoses_per_covid_case"] = \ (result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0) result_summary_df.head(daily_plot_days) weekly_result_summary_df = result_summary_df \ .sort_index(ascending=True).fillna(0).rolling(7).agg({ "covid_cases": "sum", "shared_teks_by_generation_date": "sum", "shared_teks_by_upload_date": "sum", "shared_diagnoses": "sum" }).sort_index(ascending=False) with pd.option_context("mode.use_inf_as_na", True): weekly_result_summary_df = weekly_result_summary_df.fillna(0).astype(int) weekly_result_summary_df["teks_per_shared_diagnosis"] = \ (weekly_result_summary_df.shared_teks_by_upload_date / weekly_result_summary_df.shared_diagnoses).fillna(0) weekly_result_summary_df["shared_diagnoses_per_covid_case"] = \ (weekly_result_summary_df.shared_diagnoses / weekly_result_summary_df.covid_cases).fillna(0) weekly_result_summary_df.head() last_7_days_summary = weekly_result_summary_df.to_dict(orient="records")[1] last_7_days_summary ``` ## Report Results ``` display_column_name_mapping = { "sample_date": "Sample\u00A0Date\u00A0(UTC)", "source_regions": "Source Countries", "datetime_utc": "Timestamp (UTC)", "upload_date": "Upload Date (UTC)", "generation_to_upload_days": "Generation to Upload Period in Days", "region": "Backend", "region_x": "Backend\u00A0(A)", "region_y": "Backend\u00A0(B)", "common_teks": "Common TEKs Shared Between Backends", "common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)", "covid_cases": "COVID-19 Cases in Source Countries (7-day Rolling Average)", "shared_teks_by_generation_date": "Shared TEKs by Generation Date", "shared_teks_by_upload_date": "Shared TEKs by Upload Date", "shared_diagnoses": "Shared Diagnoses (Estimation)", "teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis", "shared_diagnoses_per_covid_case": "Usage Ratio (Fraction of Cases in Source Countries Which Shared Diagnosis)", "shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date", } summary_columns = [ "covid_cases", "shared_teks_by_generation_date", "shared_teks_by_upload_date", "shared_teks_uploaded_on_generation_date", "shared_diagnoses", "teks_per_shared_diagnosis", "shared_diagnoses_per_covid_case", ] ``` ### Daily Summary Table ``` result_summary_df_ = result_summary_df.copy() result_summary_df = result_summary_df[summary_columns] result_summary_with_display_names_df = result_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) result_summary_with_display_names_df ``` ### Daily Summary Plots ``` result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \ .droplevel(level=["source_regions"]) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar( title=f"Daily Summary", rot=45, subplots=True, figsize=(15, 22), legend=False) ax_ = summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.95) ax_.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0)) _ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist())) ``` ### Daily Generation to Upload Period Table ``` display_generation_to_upload_period_pivot_df = \ generation_to_upload_period_pivot_df \ .head(backend_generation_days) display_generation_to_upload_period_pivot_df \ .head(backend_generation_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) fig, generation_to_upload_period_pivot_table_ax = plt.subplots( figsize=(12, 1 + 0.6 * len(display_generation_to_upload_period_pivot_df))) generation_to_upload_period_pivot_table_ax.set_title( "Shared TEKs Generation to Upload Period Table") sns.heatmap( data=display_generation_to_upload_period_pivot_df .rename_axis(columns=display_column_name_mapping) .rename_axis(index=display_column_name_mapping), fmt=".0f", annot=True, ax=generation_to_upload_period_pivot_table_ax) generation_to_upload_period_pivot_table_ax.get_figure().tight_layout() ``` ### Hourly Summary Plots ``` hourly_summary_ax_list = hourly_summary_df \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .plot.bar( title=f"Last 24h Summary", rot=45, subplots=True, legend=False) ax_ = hourly_summary_ax_list[-1] ax_.get_figure().tight_layout() ax_.get_figure().subplots_adjust(top=0.9) _ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist())) ``` ### Publish Results ``` def get_temporary_image_path() -> str: return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png") def save_temporary_plot_image(ax): if isinstance(ax, np.ndarray): ax = ax[0] media_path = get_temporary_image_path() ax.get_figure().savefig(media_path) return media_path def save_temporary_dataframe_image(df): import dataframe_image as dfi media_path = get_temporary_image_path() dfi.export(df, media_path) return media_path github_repository = os.environ.get("GITHUB_REPOSITORY") if github_repository is None: github_repository = "pvieito/Radar-STATS" github_project_base_url = "https://github.com/" + github_repository display_formatters = { display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}", display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}", } daily_summary_table_html = result_summary_with_display_names_df \ .head(daily_plot_days) \ .rename_axis(index=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .to_html(formatters=display_formatters) multi_backend_summary_table_html = multi_backend_summary_df \ .head(daily_plot_days) \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html(formatters=display_formatters) def format_multi_backend_cross_sharing_fraction(x): if pd.isna(x): return "-" elif round(x * 100, 1) == 0: return "" else: return f"{x:.1%}" multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \ .rename_axis(columns=display_column_name_mapping) \ .rename(columns=display_column_name_mapping) \ .rename_axis(index=display_column_name_mapping) \ .to_html( classes="table-center", formatters=display_formatters, float_format=format_multi_backend_cross_sharing_fraction) multi_backend_cross_sharing_summary_table_html = \ multi_backend_cross_sharing_summary_table_html \ .replace("<tr>","<tr style=\"text-align: center;\">") extraction_date_result_summary_df = \ result_summary_df[result_summary_df.index.get_level_values("sample_date") == extraction_date] extraction_date_result_hourly_summary_df = \ hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour] covid_cases = \ extraction_date_result_summary_df.covid_cases.item() shared_teks_by_generation_date = \ extraction_date_result_summary_df.shared_teks_by_generation_date.item() shared_teks_by_upload_date = \ extraction_date_result_summary_df.shared_teks_by_upload_date.item() shared_diagnoses = \ extraction_date_result_summary_df.shared_diagnoses.item() teks_per_shared_diagnosis = \ extraction_date_result_summary_df.teks_per_shared_diagnosis.item() shared_diagnoses_per_covid_case = \ extraction_date_result_summary_df.shared_diagnoses_per_covid_case.item() shared_teks_by_upload_date_last_hour = \ extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int) display_source_regions = ", ".join(report_source_regions) if len(report_source_regions) == 1: display_brief_source_regions = report_source_regions[0] else: display_brief_source_regions = f"{len(report_source_regions)} 🇪🇺" summary_plots_image_path = save_temporary_plot_image( ax=summary_ax_list) summary_table_image_path = save_temporary_dataframe_image( df=result_summary_with_display_names_df) hourly_summary_plots_image_path = save_temporary_plot_image( ax=hourly_summary_ax_list) multi_backend_summary_table_image_path = save_temporary_dataframe_image( df=multi_backend_summary_df) generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image( ax=generation_to_upload_period_pivot_table_ax) ``` ### Save Results ``` report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-" result_summary_df.to_csv( report_resources_path_prefix + "Summary-Table.csv") result_summary_df.to_html( report_resources_path_prefix + "Summary-Table.html") hourly_summary_df.to_csv( report_resources_path_prefix + "Hourly-Summary-Table.csv") multi_backend_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Summary-Table.csv") multi_backend_cross_sharing_summary_df.to_csv( report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv") generation_to_upload_period_pivot_df.to_csv( report_resources_path_prefix + "Generation-Upload-Period-Table.csv") _ = shutil.copyfile( summary_plots_image_path, report_resources_path_prefix + "Summary-Plots.png") _ = shutil.copyfile( summary_table_image_path, report_resources_path_prefix + "Summary-Table.png") _ = shutil.copyfile( hourly_summary_plots_image_path, report_resources_path_prefix + "Hourly-Summary-Plots.png") _ = shutil.copyfile( multi_backend_summary_table_image_path, report_resources_path_prefix + "Multi-Backend-Summary-Table.png") _ = shutil.copyfile( generation_to_upload_period_pivot_table_image_path, report_resources_path_prefix + "Generation-Upload-Period-Table.png") ``` ### Publish Results as JSON ``` def generate_summary_api_results(df: pd.DataFrame) -> list: api_df = df.reset_index().copy() api_df["sample_date_string"] = \ api_df["sample_date"].dt.strftime("%Y-%m-%d") api_df["source_regions"] = \ api_df["source_regions"].apply(lambda x: x.split(",")) return api_df.to_dict(orient="records") summary_api_results = \ generate_summary_api_results(df=result_summary_df) today_summary_api_results = \ generate_summary_api_results(df=extraction_date_result_summary_df)[0] summary_results = dict( backend_identifier=report_backend_identifier, source_regions=report_source_regions, extraction_datetime=extraction_datetime, extraction_date=extraction_date, extraction_date_with_hour=extraction_date_with_hour, last_hour=dict( shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour, shared_diagnoses=0, ), today=today_summary_api_results, last_7_days=last_7_days_summary, daily_results=summary_api_results) summary_results = \ json.loads(pd.Series([summary_results]).to_json(orient="records"))[0] with open(report_resources_path_prefix + "Summary-Results.json", "w") as f: json.dump(summary_results, f, indent=4) ``` ### Publish on README ``` with open("Data/Templates/README.md", "r") as f: readme_contents = f.read() readme_contents = readme_contents.format( extraction_date_with_hour=extraction_date_with_hour, github_project_base_url=github_project_base_url, daily_summary_table_html=daily_summary_table_html, multi_backend_summary_table_html=multi_backend_summary_table_html, multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html, display_source_regions=display_source_regions) with open("README.md", "w") as f: f.write(readme_contents) ``` ### Publish on Twitter ``` enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER") github_event_name = os.environ.get("GITHUB_EVENT_NAME") if enable_share_to_twitter and github_event_name == "schedule" and \ (shared_teks_by_upload_date_last_hour or not are_today_results_partial): import tweepy twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"] twitter_api_auth_keys = twitter_api_auth_keys.split(":") auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1]) auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3]) api = tweepy.API(auth) summary_plots_media = api.media_upload(summary_plots_image_path) summary_table_media = api.media_upload(summary_table_image_path) generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path) media_ids = [ summary_plots_media.media_id, summary_table_media.media_id, generation_to_upload_period_pivot_table_image_media.media_id, ] if are_today_results_partial: today_addendum = " (Partial)" else: today_addendum = "" status = textwrap.dedent(f""" #RadarCOVID – {extraction_date_with_hour} Source Countries: {display_brief_source_regions} Today{today_addendum}: - Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour) - Shared Diagnoses: ≤{shared_diagnoses:.0f} - Usage Ratio: ≤{shared_diagnoses_per_covid_case:.2%} Last 7 Days: - Shared Diagnoses: ≤{last_7_days_summary["shared_diagnoses"]:.0f} - Usage Ratio: ≤{last_7_days_summary["shared_diagnoses_per_covid_case"]:.2%} Info: {github_project_base_url}#documentation """) status = status.encode(encoding="utf-8") api.update_status(status=status, media_ids=media_ids) ```
github_jupyter
# Plotting the Correlation between Air Quality and Weather ``` # If done right, this program should # Shoutout to my bois at StackOverflow - you da real MVPs # Shoutout to my bois over at StackOverflow - couldn't've done it without you import pandas as pd import numpy as np from bokeh.plotting import figure from bokeh.io import show from bokeh.models import HoverTool, Label import scipy.stats weatherfile = input("Which weather file would you like to use? ") df = pd.read_csv(weatherfile) temp = df.as_matrix(columns=df.columns[3:4]) temp = temp.ravel() humidity = df.as_matrix(columns=df.columns[4:5]) humidity = humidity.ravel() pressure = df.as_matrix(columns=df.columns[5:]) pressure = pressure.ravel() unix_timeweather = df.as_matrix(columns=df.columns[2:3]) i = 0 w_used = eval(raw_input("Which data set do you want? temp, humidity, or pressure? ")) ###################################################################################### aqfile = input("Which air quality file would you like to use? ") df2 = pd.read_csv(aqfile) PM25 = df2.as_matrix(columns=df2.columns[4:5]) PM1 = df2.as_matrix(columns=df2.columns[3:4]) PM10 = df2.as_matrix(columns=df2.columns[5:]) unix_timeaq = df2.as_matrix(columns=df2.columns[2:3]) aq_used = eval(raw_input("Which data set do you want? PM1, PM25, or PM10? ")) ###################################################################################### def find_nearest(array, value): array = np.asarray(array) idx = (np.abs(array - value)).argmin() if np.abs(array[idx]-value) <= 30: # print str(value) + "Vs" + str(array[idx]) return idx else: return None ####################################################################################### def make_usable(array1, array): i = len(array1) - 1 while i > 0: if np.isnan(array[i]) or np.isnan(array1[i]): del array[i] del array1[i] i = i - 1 ####################################################################################### weatherarr = [] aqarr = [] i = 0 while i < len(aq_used): aqarr.append(float(aq_used[i])) nearest_time = find_nearest(unix_timeweather, unix_timeaq[i]) if nearest_time is None: weatherarr.append(np.nan) else: weatherarr.append(float(w_used[nearest_time])) i = i+1 # Plot the arrays ##################################################################### make_usable(weatherarr,aqarr) hoverp = HoverTool(tooltips=[("(x,y)", "($x, $y)")]) p = figure(tools = [hoverp]) correlation = Label(x=50, y=50, x_units='screen', y_units='screen', text="Pearson r and p: "+ str(scipy.stats.pearsonr(weatherarr, aqarr)),render_mode='css', border_line_color='black', border_line_alpha=1.0, background_fill_color='white', background_fill_alpha=1.0) p.add_layout(correlation) p.circle(x = weatherarr, y = aqarr, color = "firebrick") show(p) ```
github_jupyter
# Identifying special matrices ## Instructions In this assignment, you shall write a function that will test if a 4×4 matrix is singular, i.e. to determine if an inverse exists, before calculating it. You shall use the method of converting a matrix to echelon form, and testing if this fails by leaving zeros that can’t be removed on the leading diagonal. Don't worry if you've not coded before, a framework for the function has already been written. Look through the code, and you'll be instructed where to make changes. We'll do the first two rows, and you can use this as a guide to do the last two. ### Matrices in Python In the *numpy* package in Python, matrices are indexed using zero for the top-most column and left-most row. I.e., the matrix structure looks like this: ```python A[0, 0] A[0, 1] A[0, 2] A[0, 3] A[1, 0] A[1, 1] A[1, 2] A[1, 3] A[2, 0] A[2, 1] A[2, 2] A[2, 3] A[3, 0] A[3, 1] A[3, 2] A[3, 3] ``` You can access the value of each element individually using, ```python A[n, m] ``` which will give the n'th row and m'th column (starting with zero). You can also access a whole row at a time using, ```python A[n] ``` Which you will see will be useful when calculating linear combinations of rows. A final note - Python is sensitive to indentation. All the code you should complete will be at the same level of indentation as the instruction comment. ### How to submit Edit the code in the cell below to complete the assignment. Once you are finished and happy with it, press the *Submit Assignment* button at the top of this notebook. Please don't change any of the function names, as these will be checked by the grading script. If you have further questions about submissions or programming assignments, here is a [list](https://www.coursera.org/learn/linear-algebra-machine-learning/discussions/weeks/1/threads/jB4klkn5EeibtBIQyzFmQg) of Q&A. You can also raise an issue on the discussion forum. Good luck! ``` # GRADED FUNCTION import numpy as np # Our function will go through the matrix replacing each row in order turning it into echelon form. # If at any point it fails because it can't put a 1 in the leading diagonal, # we will return the value True, otherwise, we will return False. # There is no need to edit this function. def isSingular(A) : B = np.array(A, dtype=np.float_) # Make B as a copy of A, since we're going to alter it's values. try: fixRowZero(B) fixRowOne(B) fixRowTwo(B) fixRowThree(B) except MatrixIsSingular: return True return False # This next line defines our error flag. For when things go wrong if the matrix is singular. # There is no need to edit this line. class MatrixIsSingular(Exception): pass # For Row Zero, all we require is the first element is equal to 1. # We'll divide the row by the value of A[0, 0]. # This will get us in trouble though if A[0, 0] equals 0, so first we'll test for that, # and if this is true, we'll add one of the lower rows to the first one before the division. # We'll repeat the test going down each lower row until we can do the division. # There is no need to edit this function. def fixRowZero(A) : if A[0,0] == 0 : A[0] = A[0] + A[1] if A[0,0] == 0 : A[0] = A[0] + A[2] if A[0,0] == 0 : A[0] = A[0] + A[3] if A[0,0] == 0 : raise MatrixIsSingular() # otherwise, A[0] = A[0] / A[0,0] return A # First we'll set the sub-diagonal elements to zero, i.e. A[1,0]. # Next we want the diagonal element to be equal to one. # We'll divide the row by the value of A[1, 1]. # Again, we need to test if this is zero. # If so, we'll add a lower row and repeat setting the sub-diagonal elements to zero. # There is no need to edit this function. def fixRowOne(A) : A[1] = A[1] - A[1,0] * A[0] if A[1,1] == 0 : A[1] = A[1] + A[2] A[1] = A[1] - A[1,0] * A[0] if A[1,1] == 0 : A[1] = A[1] + A[3] A[1] = A[1] - A[1,0] * A[0] if A[1,1] == 0 : raise MatrixIsSingular() A[1] = A[1] / A[1,1] return A # This is the first function that you should complete. # Follow the instructions inside the function at each comment. def fixRowTwo(A) : # Insert code below to set the sub-diagonal elements of row two to zero (there are two of them). A[2] = A[2] - A[2,0] * A[0] A[2] = A[2] - A[2,1] * A[1] # Next we'll test that the diagonal element is not zero. if A[2,2] == 0 : # Insert code below that adds a lower row to row 2. A[2] = A[2] + A[3] # Now repeat your code which sets the sub-diagonal elements to zero. A[2] = A[2] - A[2,0] * A[0] A[2] = A[2] - A[2,1] * A[1] if A[2,2] == 0 : raise MatrixIsSingular() # Finally set the diagonal element to one by dividing the whole row by that element. A[2] = A[2] / A[2,2] return A # You should also complete this function # Follow the instructions inside the function at each comment. def fixRowThree(A) : # Insert code below to set the sub-diagonal elements of row three to zero. A[3] = A[3] - A[3,0] * A[0] A[3] = A[3] - A[3,1] * A[1] A[3] = A[3] - A[3,2] * A[2] # Complete the if statement to test if the diagonal element is zero. if A[3,3] == 0: raise MatrixIsSingular() # Transform the row to set the diagonal element to one. A[3] = A[3] / A[3,3] return A ``` ## Test your code before submission To test the code you've written above, run the cell (select the cell above, then press the play button [ ▶| ] or press shift-enter). You can then use the code below to test out your function. You don't need to submit this cell; you can edit and run it as much as you like. Try out your code on tricky test cases! ``` A = np.array([ [2, 0, 0, 0], [0, 3, 0, 0], [0, 0, 4, 4], [0, 0, 5, 5] ], dtype=np.float_) isSingular(A) A = np.array([ [0, 7, -5, 3], [2, 8, 0, 4], [3, 12, 0, 5], [1, 3, 1, 3] ], dtype=np.float_) fixRowZero(A) fixRowOne(A) fixRowTwo(A) fixRowThree(A) ```
github_jupyter
# Prosper Loan Data Exploration ## By Abhishek Tiwari # Preliminary Wrangling This data set contains information on peer to peer loans facilitated by credit company Prosper ``` # import all packages import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline df = pd.read_csv('prosperLoanData.csv') df.head() df.info() df.describe() df.sample(10) ``` Note that this data set contains 81 columns. For the purpose of this analysis I’ve took the following columns (variables): ``` target_columns = [ 'Term', 'LoanStatus', 'BorrowerRate', 'ProsperRating (Alpha)', 'ListingCategory (numeric)', 'EmploymentStatus', 'DelinquenciesLast7Years', 'StatedMonthlyIncome', 'TotalProsperLoans', 'LoanOriginalAmount', 'LoanOriginationDate', 'Recommendations', 'Investors' ] target_df = df[target_columns] target_df.sample(10) ``` Since Prosper use their own proprietary Prosper Rating only since 2009, we have a lot of missing values in ProsperRating column. Let's drop these missing values: ``` target_df.info() target_df.describe() ``` Since Prosper use their own proprietary Prosper Rating only since 2009, we have a lot of missing values in ProsperRating column. Let's drop these missing values: ``` target_df = target_df.dropna(subset=['ProsperRating (Alpha)']).reset_index() ``` Convert LoanOriginationDate to datetime datatype: ``` target_df['LoanOriginationDate'] = pd.to_datetime(target_df['LoanOriginationDate']) target_df['TotalProsperLoans'] = target_df['TotalProsperLoans'].fillna(0) target_df.info() ``` ### What is/are the main feature(s) of interest in your dataset? > Trying to figure out what features can be used to predict default on credit. Also i would like to check what are major factors connected with prosper credit rating. ### What features in the dataset do you think will help support your investigation into your feature(s) of interest? > I think that the borrowers Prosper rating will have the highest impact on chances of default. Also I expect that loan amount will play a major role and maybe the category of credit. Prosper rating will depend on stated income and employment status. ## Univariate Exploration ### Loan status ``` # setting color base_color = sns.color_palette()[0] plt.xticks(rotation=90) sns.countplot(data = target_df, x = 'LoanStatus', color = base_color); ``` Observation 1: * Most of the loans in the data set are actually current loans. * Past due loans are split in several groups based on the length of payment delay. * Other big part is completed loans, defaulted loans compromise a minority, however chargedoff loans also comporomise a substanial amount. ### Employment Status ``` sns.countplot(data = target_df, x = 'EmploymentStatus', color = base_color); plt.xticks(rotation = 90); ``` Observation 2: * The majority of borrowers are employed and all other categories as small part of borrowers. * In small Group full time has highest, after that self empolyed are there and so on. ### Stated Monthly Income ``` plt.hist(data=target_df, x='StatedMonthlyIncome', bins=1000); ``` (**Note**: Distribution of stated monthly income is highly skewed to the right. so, we have to check how many outliers are there) ``` income_std = target_df['StatedMonthlyIncome'].std() income_mean = target_df['StatedMonthlyIncome'].mean() boundary = income_mean + income_std * 3 len(target_df[target_df['StatedMonthlyIncome'] >= boundary]) ``` **After Zooming the Graph We Get This** ``` plt.hist(data=target_df, x='StatedMonthlyIncome', bins=1000); plt.xlim(0, boundary); ``` Observation 3: * With a boundary of mean and 3 times standard deviations distribution of monthly income still has noticeable right skew but now we can see that mode is about 5000. ### Discuss the distribution(s) of your variable(s) of interest. Were there any unusual points? Did you need to perform any transformations? > Distribution of monthly stated income is very awkward: with a lot of outliers and very large range but still it was right skew. The majority of borrowers are employed and all other categories as small part of borrowers and most of the loans in the data set are actually current loans. ### Of the features you investigated, were there any unusual distributions? Did you perform any operations on the data to tidy, adjust, or change the form of the data? If so, why did you do this? > The majority of loans are actually current loans. Since our main goal is to define driving factors of outcome of loan we are not interested in any current loans. ## Bivariate Exploration ``` #I'm just adjusting the form of data condition = (target_df['LoanStatus'] == 'Completed') | (target_df['LoanStatus'] == 'Defaulted') |\ (target_df['LoanStatus'] == 'Chargedoff') target_df = target_df[condition] def change_to_defaulted(row): if row['LoanStatus'] == 'Chargedoff': return 'Defaulted' else: return row['LoanStatus'] target_df['LoanStatus'] = target_df.apply(change_to_defaulted, axis=1) target_df['LoanStatus'].value_counts() ``` **After transforming dataset we have 19664 completed loans and 6341 defaulted.** ``` categories = {1: 'Debt Consolidation', 2: 'Home Improvement', 3: 'Business', 6: 'Auto', 7: 'Other'} def reduce_categorie(row): loan_category = row['ListingCategory (numeric)'] if loan_category in categories: return categories[loan_category] else: return categories[7] target_df['ListingCategory (numeric)'] = target_df.apply(reduce_categorie, axis=1) target_df['ListingCategory (numeric)'].value_counts() ``` Variable Listing Category is set up as numeric and most of the values have very `low frequency`, for the easier visualization so we have change it to `categorical and reduce the number of categories`. ### Status and Prosper Rating: ``` sns.countplot(data = target_df, x = 'LoanStatus', hue = 'ProsperRating (Alpha)', palette = 'Blues') ``` Observation 1: * The `most frequent` rating among defaulted loans is actually `D`. * And the `most frequent` rating among Completed is also` D `and second highest is A and so on. ### Credit Start with Listing Category: ``` sns.countplot(data = target_df, x = 'LoanStatus', hue = 'ListingCategory (numeric)', palette = 'Blues'); ``` Observation 2: * In both of the Graphs the `debt Consolidation` have `most frequency among all of them`. ## Loan Status and Loan Amount ``` sns.boxplot(data = target_df, x = 'LoanStatus', y = 'LoanOriginalAmount', color = base_color); ``` Observation 3: * As from Above Graph we can state that `defaulted credits` tend to be `smaller` than `completed credits` onces. ## Prosper Rating and Employment Status ``` plt.figure(figsize = [12, 10]) sns.countplot(data = target_df, x = 'ProsperRating (Alpha)', hue = 'EmploymentStatus', palette = 'Blues'); ``` Observation 4: * Lower ratings seem to have greater proportions of individuals with employment status Not Employed, Self-employed, Retired and Part-Time. ## Talk about some of the relationships you observed in this part of the investigation. How did the feature(s) of interest vary with other features in the dataset? > In Loan status vs Loan amount defaulted credits tend to be smaller than completed credits onces. Employment status of individuals with lower ratings tends to be 'Not employed', 'Self-employed', 'Retired' or 'Part-time'. ## Did you observe any interesting relationships between the other features (not the main feature(s) of interest)? > Prosper rating D is the most frequent rating among defaulted credits. ## Multivariate Exploration ## Rating, Loan Amount and Loan Status ``` plt.figure(figsize = [12, 8]) sns.boxplot(data=target_df, x='ProsperRating (Alpha)', y='LoanOriginalAmount', hue='LoanStatus'); ``` Observation 1: * Except for the lowest ratings defaulted credits tend to be larger than completed. * Most of the defaulted credits comes from individuals with low Prosper rating. ## Relationships between Credit category, Credit rating and outcome of Credit. ``` sns.catplot(x = 'ProsperRating (Alpha)', hue = 'LoanStatus', col = 'ListingCategory (numeric)', data = target_df, kind = 'count', palette = 'Blues', col_wrap = 3); ``` Observation 2: * There are 5 graphs in the second one has much up and downs in it other than all of them. * There is no substantial difference for default rates in different categories broken up by ratings. ## Amount, Listing Category Loan and Loan Status Interact ``` plt.figure(figsize = [12, 8]) sns.violinplot(data=target_df, x='ListingCategory (numeric)', y='LoanOriginalAmount', hue='LoanStatus'); ``` Observation 3: * Except for Auto, Business and Home Improvemrnt dont have nearly equal mean amoong all of them. * Business category tend to have larger amount. ## Talk about some of the relationships you observed in this part of the investigation. Were there features that strengthened each other in terms of looking at your feature(s) of interest? > Our initial assumptions were strengthened. Most of the defaulted credits comes from individuals with low Prosper rating and Business category tend to have larger amount. ## Were there any interesting or surprising interactions between features? > Interesting find was that defaulted credits for individuals with high Prosper ratings tend to be larger than completed credits.
github_jupyter
# Setup ``` %matplotlib inline import numpy as np import scipy.signal as sig import scipy.stats as stat import matplotlib.pyplot as plt import seaborn as sns import os import h5py import datetime import pandas as pd from pandas import DataFrame,Series,read_table ``` General info ``` savePlots = True # whether or not to save plots saveData = True # whether or not to save csv files saveAsPath = './Fig 03/' if not os.path.exists(saveAsPath): os.mkdir(saveAsPath) saveAsName = 'Fig3' #path = '/Users/svcanavan/Dropbox/Coding in progress/00_BudgieSleep/Data_copies/' birdPaths = ['../data_copies/01_PreprocessedData/01_BudgieFemale_green1/00_Baseline_night/', '../data_copies/01_PreprocessedData/02_BudgieMale_yellow1/00_Baseline_night/', '../data_copies/01_PreprocessedData/03_BudgieFemale_white1/00_Baseline_night/', '../data_copies/01_PreprocessedData/04_BudgieMale_yellow2/00_Baseline_night/', '../data_copies/01_PreprocessedData/05_BudgieFemale_green2/00_Baseline_night/'] arfFilePaths = ['EEG 2 scored/', 'EEG 3 scored/', 'EEG 3 scored/', 'EEG 4 scored/', 'EEG 4 scored/'] ### load BEST EEG channels - as determined during manual scoring #### channelsToLoadEEG_best = [['6 LEEGm-LEEGp', '5 LEEGf-LEEGp'], #, '9 REEGp-LEEGp'], # extra channel to represent R hemisphere ['5 LEEGf-LEEGm', '4 LEEGf-Fgr'], #, '9 REEGf-REEGm'], # extra channel to represent R hemisphere ['9REEGm-REEGp', '4LEEGf-LEEGp'], ['6LEEGm-LEEGf', '9REEGf-REEGp'], ['7REEGf-REEGp', '4LEEGf-LEEGp']] ### load ALL of EEG channels #### channelsToLoadEEG = [['4 LEEGf-Fgr', '5 LEEGf-LEEGp', '6 LEEGm-LEEGp', '7 LEEGp-Fgr', '8 REEGp-Fgr','9 REEGp-LEEGp'], ['4 LEEGf-Fgr','5 LEEGf-LEEGm', '6 LEEGm-LEEGp', '7 REEGf-Fgr', '8 REEGm-Fgr', '9 REEGf-REEGm'], ['4LEEGf-LEEGp', '5LEEGf-LEEGm', '6LEEGm-LEEGp', '7REEGf-REEGp', '8REEGf-REEGm', '9REEGm-REEGp'], ['4LEEGf-LEEGp', '5LEEGm-LEEGp', '6LEEGm-LEEGf', '7REEGf-Fgr', '8REEGf-REEGm','9REEGf-REEGp',], ['4LEEGf-LEEGp', '5LEEGf-LEEGm', '6LEEGm-LEEGp', '7REEGf-REEGp', '8REEGf-REEGm', '9REEGm-REEGp']] channelsToLoadEOG = [['1 LEOG-Fgr', '2 REOG-Fgr'], ['2 LEOG-Fgr', '3 REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr'], ['2LEOG-Fgr', '3REOG-Fgr']] birds_LL = [1,2,3] nBirds_LL = len(birds_LL) birdPaths_LL = ['../data_copies/01_PreprocessedData/02_BudgieMale_yellow1/01_Constant_light/', '../data_copies/01_PreprocessedData/03_BudgieFemale_white1/01_Constant_light/', '../data_copies/01_PreprocessedData/04_BudgieMale_yellow2/01_Constant_light/',] arfFilePaths_LL = ['EEG 2 preprocessed/', 'EEG 2 preprocessed/', 'EEG 2 preprocessed/'] lightsOffSec = np.array([7947, 9675, 9861 + 8*3600, 9873, 13467]) # lights off times in seconds from beginning of file lightsOnSec = np.array([46449, 48168, 48375+ 8*3600, 48381, 52005]) # Bird 3 gets 8 hours added b/c file starts at 8:00 instead of 16:00 epochLength = 3 sr = 200 scalingFactor = (2**15)*0.195 # scaling/conversion factor from amplitude to uV (when recording arf from jrecord) stages = ['w','d','u','i','s','r'] # wake, drowsy, unihem sleep, intermediate sleep, SWS, REM stagesSleep = ['u','i','s','r'] stagesVideo = ['m','q','d','s','u'] # moving wake, quiet wake, drowsy, sleep, unclear ## Path to scores formatted as CSVs formatted_scores_path = '../formatted_scores/' ## Path to detect SW ands EM events: use folder w/ EMs and EM artifacts detected during non-sleep events_path = '../data_copies/SWs_EMs_and_EMartifacts/' colors = sns.color_palette(np.array([[234,103,99], [218,142,60], [174,174,62], [97,188,101], [140,133,232], [225,113,190]]) /255) sns.palplot(colors) # colorpalette from iWantHue colors_birds = [np.repeat(.4, 3), np.repeat(.5, 3), np.repeat(.6, 3), np.repeat(.7, 3), np.repeat(.8, 3)] sns.palplot(colors_birds) ``` Plot-specific info ``` sns.set_context("notebook", font_scale=1.5) sns.set_style("white") # Markers for legends of EEG scoring colors legendMarkersEEG = [] for stage in range(len(stages)): legendMarkersEEG.append(plt.Line2D([0],[0], color=colors[stage], marker='o', linestyle='', alpha=0.7)) ``` Calculate general variables ``` lightsOffEp = lightsOffSec / epochLength lightsOnEp = lightsOnSec / epochLength nBirds = len(birdPaths) epochLengthPts = epochLength*sr nStages = len(stagesSleep) ``` ## LEGEND: bird colors ``` # Markers for bird colors legendMarkers_birds = [] for b in range(nBirds): legendMarkers_birds.append(plt.Line2D([0],[0], marker='o', color=colors_birds[b], linestyle='', alpha=0.7)) plt.legend(legendMarkers_birds, ['Bird 1', 'Bird 2', 'Bird 3', 'Bird 4', 'Bird 5']) sns.despine(bottom=True, left=True) plt.yticks([]) plt.xticks([]) if savePlots: plt.savefig(saveAsPath + saveAsName + "a-d_bird_color_legend.pdf") ``` ## Load formatted scores ``` AllScores = {} for b in range(nBirds): bird_name = 'Bird ' + str(b+1) file = formatted_scores_path + 'All_scores_' + bird_name + '.csv' data = pd.read_csv(file, index_col=0) AllScores[bird_name] = data ``` ## Calculate lights off in Zeitgeber time (s and hrs) Lights on is 0 ``` lightsOffDatetime = np.array([], dtype='datetime64') lightsOnDatetime = np.array([], dtype='datetime64') for b_num in range(nBirds): b_name = 'Bird ' + str(b_num+1) Scores = AllScores[b_name] startDatetime = np.datetime64(Scores.index.values[0]) # Calc lights off & on using datetime formats lightsOffTimedelta = lightsOffSec[b_num].astype('timedelta64[s]') lightsOffDatetime = np.append(lightsOffDatetime, startDatetime + lightsOffTimedelta) lightsOnTimedelta = lightsOnSec[b_num].astype('timedelta64[s]') lightsOnDatetime = np.append(lightsOnDatetime, startDatetime + lightsOnTimedelta) lightsOffZeit_s = lightsOffSec - lightsOnSec lightsOffZeit_hr = lightsOffZeit_s / 3600 ``` # With all sleep stages separately ## Merge continuous epochs of the same stage, same length & start time ``` ScoresMerged = {} for key in AllScores.keys(): scores = AllScores[key] # add a "stop" column scores['Stop (s)'] = scores['Time (s)'] + epochLength original_end_s = scores['Stop (s)'].iloc[-1] # add a dummy row wherever there is a gap between epochs gaps = np.where(scores['Stop (s)'] != scores['Time (s)'].shift(-1))[0] gaps = gaps[0:-1] # don't include the last row of the file as a gap gap_datetimes = scores.iloc[gaps].index.astype('datetime64') + np.timedelta64(3, 's') lines = pd.DataFrame({'Time (s)': scores['Stop (s)'].iloc[gaps].values, 'Stop (s)': scores['Time (s)'].iloc[gaps+1].values, 'Label': 'gap'}, index=gap_datetimes.astype('str')) scores = scores.append(lines, ignore_index=False) scores = scores.sort_index() # add a datetime column scores['datetime'] = scores.index # add a column to keep track of consecutive epochs of the same stage scores['episode #'] = (scores['Label'] != scores['Label'].shift(1)).astype(int).cumsum() # don't worry about the gaps where the video recording restarted # if the behavior is the same before and after the gap, count it as a continuous episode # if you want to split episodes where the gap occurs, add: # (scores['Stop (s)'] == scores['Time (s)'].shift(-1)) # combine all epochs of the same episode # and use the values from the first epoch of that episode merged_scores = scores.groupby(scores['episode #'], sort=False).aggregate('first') # calculate length of each episode lengths = merged_scores['Time (s)'].shift(-1) - merged_scores['Time (s)'] lengths.iloc[-1] = original_end_s - merged_scores['Time (s)'].iloc[-1] merged_scores['Length (s)'] = lengths # set index back to datetime merged_scores.index = merged_scores['datetime'] gap_rows =merged_scores[merged_scores['Label']=='gap'].index merged_scores = merged_scores.drop(gap_rows) ScoresMerged[key] = merged_scores # Check lengths print(len(AllScores['Bird 5']) * epochLength/3600) # original print(ScoresMerged['Bird 5']['Length (s)'].sum()/3600) # merged ``` ### Save to csv ``` if saveData: for b in range(nBirds): b_name = 'Bird ' + str(b+1) scores = ScoresMerged[b_name] scores.to_csv(saveAsPath + saveAsName + '_scores_merged_' + b_name + '.csv') ``` ## Frequency of episodes of each stage per bin ``` binSize_min = 60 binSize_s = np.timedelta64(int(binSize_min*60), 's') stageProportions_whole_night_all = {} for b in range(nBirds): nBins = int(np.ceil(np.min(lightsOnSec - lightsOffSec)/(60*binSize_min))) stageProportions = DataFrame([], columns=range(len(stages))) b_name = 'Bird ' + str(b+1) Scores = ScoresMerged[b_name] for bn in range(nBins): start_time = str(lightsOffDatetime[b] + bn*binSize_s).replace('T', ' ') end_time = str(lightsOffDatetime[b] + (bn+1)*binSize_s).replace('T', ' ') bn_scores = Scores[str(start_time):str(end_time)] bn_stage_frequencies = bn_scores['Label (#)'].value_counts(sort=False) stageProportions = stageProportions.append(bn_stage_frequencies, ignore_index=True) # Replace NaNs with 0 stageProportions = stageProportions.fillna(0) # Add to dictionary stageProportions_whole_night_all[b] = stageProportions ``` ### Save to csv ``` if saveData: for b in range(nBirds): b_name = 'Bird ' + str(b+1) stageProportions = stageProportions_whole_night_all[b] stageProportions.to_csv(saveAsPath + saveAsName + 'c_stage_frequencies_' + b_name + '.csv') ``` ### FIGURE 3C: Plot ``` figsize = (8,6) axis_label_fontsize = 24 # Line formatting linewidth = 5 linealpha = .7 marker = 'o' markersize = 10 err_capsize = 3 err_capthick = 3 elinewidth = 3 # Bar formatting bar_linewidth = 4 plt.figure(figsize=figsize) bird_means_by_stage = pd.DataFrame([]) All_per_bin_means = pd.DataFrame([]) for st in range(len(stages[3:6])): st_freqs = np.zeros((nBins, nBirds)) bird_means = np.zeros(nBirds) for b in range(nBirds): stageProportions = stageProportions_whole_night_all[b] st_freqs[:,b] = stageProportions[st+3] bird_means[b] = np.mean(stageProportions[st+3]) nighttime_mean = np.mean(st_freqs) per_bin_mean = np.mean(st_freqs, axis=1) per_bin_sd = np.std(st_freqs, axis=1) per_bin_sem = per_bin_sd / np.sqrt(nBirds) # save to dataframe All_per_bin_means[st+3] = per_bin_mean plt.errorbar(range(nBins), per_bin_mean, yerr=per_bin_sem, color=colors[3:6][st], linewidth=linewidth, alpha=linealpha, marker=marker, markersize=markersize, capsize=err_capsize, capthick=err_capthick, elinewidth=elinewidth); # Dots marking nighttime mean of each bird plt.scatter(np.ones(nBirds)*(nBins+2+(st*2)), bird_means, 50, color=colors[3:6][st]); # Bar graph of mean across all birds plt.bar(nBins+2+(st*2), np.mean(bird_means), width=2, color='none',edgecolor=colors[3:6][st], linewidth=bar_linewidth); print(stages[3:6][st] + ' : mean ' + str(np.mean(bird_means)) + ', SD ' + str(np.std(bird_means))) bird_means_by_stage[stages[3:6][st]] = bird_means # Dots color coded by bird for b in range(nBirds): plt.scatter(nBins+(np.arange(0,3)*2)+2, bird_means_by_stage.loc[b], 50, color=colors_birds[b], alpha=.5) plt.ylim(0,225) plt.xlim(-.5,19) # x tick labels: label each bar of the bar graph separately c = (0,0,0) plt.xticks([0,2,4,6,8,10,13,15,17], [0,2,4,6,8,10, 'IS', 'SWS','REM']); ax = plt.gca() [t.set_color(i) for (i,t) in zip([c,c,c,c,c,c,colors[3],colors[4],colors[5]],ax.xaxis.get_ticklabels())] plt.ylabel('Number of episodes / hour', fontsize=axis_label_fontsize) plt.xlabel('Hour of night Total', fontsize=axis_label_fontsize) sns.despine() if savePlots: plt.savefig(saveAsPath + saveAsName + "c_frequencies.pdf") ``` ### FIGURE 3C: STATISTICS ``` # One-way ANOVA: mean frequencies stat.f_oneway(bird_means_by_stage['i'],bird_means_by_stage['s'], bird_means_by_stage['r']) # Mean frequencies: IS vs REM stat.ttest_rel(bird_means_by_stage['i'], bird_means_by_stage['r']) # Mean frequencies: SWS vs REM stat.ttest_rel(bird_means_by_stage['s'], bird_means_by_stage['r']) # Mean frequencies: IS vs SWS stat.ttest_rel(bird_means_by_stage['i'], bird_means_by_stage['s']) # IS: regression with hour of night test = All_per_bin_means[3] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # IS: regression with hour of night test = All_per_bin_means[3][1:] # FIRST HOUR OMITTED slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # SWS: regression with hour of night test = All_per_bin_means[4] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # SWS: regression with hour of night test = All_per_bin_means[4][1:] # FIRST HOUR OMITTED slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # REM: regression with hour of night test = All_per_bin_means[5] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # REM: regression with hour of night test = All_per_bin_means[5][1:] # FIRST HOUR OMITTED slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) ``` ## Duration of episodes of each stage per bin ``` binSize_min = 60 binSize_s = np.timedelta64(int(binSize_min*60), 's') stageProportions_whole_night_all = {} for b in range(nBirds): nBins = int(np.ceil(np.min(lightsOnSec - lightsOffSec)/(60*binSize_min))) stageProportions = DataFrame([], columns=range(len(stages))) b_name = 'Bird ' + str(b+1) Scores = ScoresMerged[b_name] for bn in range(nBins): start_time = str(lightsOffDatetime[b] + bn*binSize_s).replace('T', ' ') end_time = str(lightsOffDatetime[b] + (bn+1)*binSize_s).replace('T', ' ') bn_scores = Scores[start_time:end_time] bn_stage_lengths = np.array([]) for st in range(len(stages)): bn_st_episodes = bn_scores[bn_scores['Label (#)'] == st] if len(bn_st_episodes) > 0: bn_avg_length = bn_st_episodes['Length (s)'].mean(0) else: bn_avg_length = np.nan bn_stage_lengths = np.append(bn_stage_lengths, bn_avg_length) stageProportions.loc[bn] = bn_stage_lengths # Add to dictionary stageProportions_whole_night_all[b] = stageProportions ``` ### FIGURE 3A: Plot ``` plt.figure(figsize=figsize) bird_means_by_stage = pd.DataFrame([]) # init All_per_bin_means = pd.DataFrame([]) for st in range(len(stages[3:6])): st_lengths = np.zeros((nBins, nBirds)) bird_means = np.zeros(nBirds) for b in range(nBirds): stageProportions = stageProportions_whole_night_all[b] st_lengths[:,b] = stageProportions[st+3] bird_means[b] = np.mean(stageProportions[st+3]) nighttime_mean = np.mean(st_lengths) per_bin_mean = np.mean(st_lengths, axis=1) per_bin_sd = np.std(st_lengths, axis=1) per_bin_sem = per_bin_sd / np.sqrt(nBirds) # save to dataframe All_per_bin_means[st+3] = per_bin_mean plt.errorbar(range(nBins), per_bin_mean, yerr=per_bin_sem, color=colors[3:6][st], linewidth=linewidth, alpha=linealpha, marker=marker, markersize=markersize, capsize=err_capsize, capthick=err_capthick, elinewidth=elinewidth); # Dots marking nighttime mean of each bird plt.scatter(np.ones(nBirds)*(nBins+(st*2)+2), bird_means, 50, color=colors[3:6][st]); # Bar graph of mean across all birds plt.bar(nBins+(st*2)+2, np.mean(bird_means), width=2, color='none',edgecolor=colors[3:6][st], linewidth=bar_linewidth); print(stages[3:6][st] + ' : mean ' + str(np.mean(bird_means)) + ', SD ' + str(np.std(bird_means))) bird_means_by_stage[stages[3:6][st]] = bird_means # Dots color coded by bird for b in range(nBirds): plt.scatter(nBins+(np.arange(0,3)*2)+2, bird_means_by_stage.loc[b], 50, color=colors_birds[b], alpha=.5) plt.ylim(0,25) plt.xlim(-.5,19) # x tick labels: label each bar of the bar graph separately c = (0,0,0) plt.xticks([0,2,4,6,8,10,13,15,17], [0,2,4,6,8,10,'IS', 'SWS','REM']); ax = plt.gca() [t.set_color(i) for (i,t) in zip([c,c,c,c,c,c,colors[3],colors[4],colors[5]],ax.xaxis.get_ticklabels())] plt.ylabel('Mean duration of episodes (s)', fontsize=axis_label_fontsize) plt.xlabel('Hour of night Total', fontsize=axis_label_fontsize) sns.despine() if savePlots: plt.savefig(saveAsPath + saveAsName + "a_durations.pdf") ``` ### FIGURE 3A STATISTICS ``` # One-way ANOVA: mean durations stat.f_oneway(bird_means_by_stage['i'],bird_means_by_stage['s'], bird_means_by_stage['r']) # Mean durations: IS vs REM stat.ttest_rel(bird_means_by_stage['i'], bird_means_by_stage['r']) # Mean durations: SWS vs REM stat.ttest_rel(bird_means_by_stage['s'], bird_means_by_stage['r']) # Mean durations: SWS vs IS stat.ttest_rel(bird_means_by_stage['s'], bird_means_by_stage['i']) # IS: regression with hour of night test = All_per_bin_means[3] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # SWS: regression with hour of night test = All_per_bin_means[4] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # REM: regression with hour of night test = All_per_bin_means[5] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) ``` ## Save as csv ``` if saveData: for b in range(nBirds): b_name = 'Bird ' + str(b+1) stageProportions = stageProportions_whole_night_all[b] stageProportions.to_csv(saveAsPath + saveAsName + 'a_stage_durations_' + b_name + '.csv') ``` # With NREM stages lumped ``` # THIS NOTEBOOK ONLY: stages = ['w','d','u', 'n', 'n','r'] # wake, drowsy, unihem sleep, NREM, REM stagesSleep = ['u','n', 'n','r'] # THIS NOTEBOOK ONLY - LUMP NREM: NREM_color = np.median(np.array([[97,188,101], [140,133,232]]),axis=0) colors = sns.color_palette(np.array([[234,103,99], [218,142,60], [174,174,62], NREM_color, NREM_color, [225,113,190]]) /255) sns.palplot(colors) # colorpalette from iWantHue ``` ## Reload formatted scores and replace IS and SWS with just "NREM" ``` AllScores = {} for b in range(nBirds): bird_name = 'Bird ' + str(b+1) file = formatted_scores_path + 'All_scores_' + bird_name + '.csv' data = pd.read_csv(file, index_col=0) labels = data['Label'].replace(to_replace=np.nan, value='u').values label_nums = data['Label (#)'].values indsNREM = [x for x in range(int(len(labels))) if ('i' in labels[x])|('s' in labels[x])] for ind in indsNREM: labels[ind] = 'n' label_nums[ind] = 4 data['Label'] = labels data['Label (#)'] = label_nums AllScores[bird_name] = data ``` ## Calculate lights off in Zeitgeber time (s and hrs) Lights on is 0 ``` lightsOffDatetime = np.array([], dtype='datetime64') lightsOnDatetime = np.array([], dtype='datetime64') for b_num in range(nBirds): b_name = 'Bird ' + str(b_num+1) Scores = AllScores[b_name] startDatetime = np.datetime64(Scores.index.values[0]) # Calc lights off & on using datetime formats lightsOffTimedelta = lightsOffSec[b_num].astype('timedelta64[s]') lightsOffDatetime = np.append(lightsOffDatetime, startDatetime + lightsOffTimedelta) lightsOnTimedelta = lightsOnSec[b_num].astype('timedelta64[s]') lightsOnDatetime = np.append(lightsOnDatetime, startDatetime + lightsOnTimedelta) lightsOffZeit_s = lightsOffSec - lightsOnSec lightsOffZeit_hr = lightsOffZeit_s / 3600 ``` ## Merge continuous epochs of the same stage, same length & start time ``` ScoresMerged = {} for key in AllScores.keys(): scores = AllScores[key] # add a "stop" column scores['Stop (s)'] = scores['Time (s)'] + epochLength original_end_s = scores['Stop (s)'].iloc[-1] # add a dummy row wherever there is a gap between epochs gaps = np.where(scores['Stop (s)'] != scores['Time (s)'].shift(-1))[0] gaps = gaps[0:-1] # don't include the last row of the file as a gap gap_datetimes = scores.iloc[gaps].index.astype('datetime64') + np.timedelta64(3, 's') lines = pd.DataFrame({'Time (s)': scores['Stop (s)'].iloc[gaps].values, 'Stop (s)': scores['Time (s)'].iloc[gaps+1].values, 'Label': 'gap'}, index=gap_datetimes.astype('str')) scores = scores.append(lines, ignore_index=False) scores = scores.sort_index() # add a datetime column scores['datetime'] = scores.index # add a column to keep track of consecutive epochs of the same stage scores['episode #'] = (scores['Label'] != scores['Label'].shift(1)).astype(int).cumsum() # don't worry about the gaps where the video recording restarted # if the behavior is the same before and after the gap, count it as a continuous episode # if you want to split episodes where the gap occurs, add: # (scores['Stop (s)'] == scores['Time (s)'].shift(-1)) # combine all epochs of the same episode # and use the values from the first epoch of that episode merged_scores = scores.groupby(scores['episode #'], sort=False).aggregate('first') # calculate length of each episode lengths = merged_scores['Time (s)'].shift(-1) - merged_scores['Time (s)'] lengths.iloc[-1] = original_end_s - merged_scores['Time (s)'].iloc[-1] merged_scores['Length (s)'] = lengths # set index back to datetime merged_scores.index = merged_scores['datetime'] gap_rows =merged_scores[merged_scores['Label']=='gap'].index merged_scores = merged_scores.drop(gap_rows) ScoresMerged[key] = merged_scores # Check lengths print(len(AllScores['Bird 5']) * epochLength/3600) # original print(ScoresMerged['Bird 5']['Length (s)'].sum()/3600) # merged ``` ## Frequency of episodes of each stage per bin ``` binSize_min = 60 binSize_s = np.timedelta64(int(binSize_min*60), 's') stageProportions_whole_night_all = {} for b in range(nBirds): nBins = int(np.ceil(np.min(lightsOnSec - lightsOffSec)/(60*binSize_min))) stageProportions = DataFrame([], columns=range(len(stages))) b_name = 'Bird ' + str(b+1) Scores = ScoresMerged[b_name] for bn in range(nBins): start_time = str(lightsOffDatetime[b] + bn*binSize_s).replace('T', ' ') end_time = str(lightsOffDatetime[b] + (bn+1)*binSize_s).replace('T', ' ') bn_scores = Scores[str(start_time):str(end_time)] bn_stage_frequencies = bn_scores['Label (#)'].value_counts(sort=False) stageProportions = stageProportions.append(bn_stage_frequencies, ignore_index=True) # Replace NaNs with 0 stageProportions = stageProportions.fillna(0) # Add to dictionary stageProportions_whole_night_all[b] = stageProportions ``` ### save to csv ``` if saveData: for b in range(nBirds): b_name = 'Bird ' + str(b+1) stageProportions = stageProportions_whole_night_all[b] stageProportions.to_csv(saveAsPath + saveAsName + 'd_NREM_lumped_stage_frequencies_' + b_name + '.csv') ``` ### FIGURE 3D Plot ``` figsize = (8,6) axis_label_fontsize = 24 # Line formatting linewidth = 5 linealpha = .7 marker = 'o' markersize = 10 err_capsize = 3 err_capthick = 3 elinewidth = 3 # Bar formatting bar_linewidth = 4 plt.figure(figsize=figsize) bird_means_by_stage = pd.DataFrame([]) All_per_bin_means = pd.DataFrame([]) for st in range(len(stages[4:6])): st_freqs = np.zeros((nBins, nBirds)) bird_means = np.zeros(nBirds) for b in range(nBirds): stageProportions = stageProportions_whole_night_all[b] st_freqs[:,b] = stageProportions[st+4] bird_means[b] = np.mean(stageProportions[st+4]) nighttime_mean = np.mean(st_freqs) per_bin_mean = np.mean(st_freqs, axis=1) per_bin_sd = np.std(st_freqs, axis=1) per_bin_sem = per_bin_sd / np.sqrt(nBirds) # save to dataframe All_per_bin_means[st+4] = per_bin_mean plt.errorbar(range(nBins), per_bin_mean, yerr=per_bin_sem, color=colors[4:6][st], linewidth=linewidth, alpha=linealpha, marker=marker, markersize=markersize, capsize=err_capsize, capthick=err_capthick, elinewidth=elinewidth); # Dots marking nighttime mean of each bird plt.scatter(np.ones(nBirds)*(nBins+2+(st*2)), bird_means, 50, color=colors[4:6][st]); # Bar graph of mean across all birds plt.bar(nBins+2+(st*2), np.mean(bird_means), width=2, color='none',edgecolor=colors[4:6][st], linewidth=bar_linewidth); print(stages[4:6][st] + ' : mean ' + str(np.mean(bird_means)) + ', SD ' + str(np.std(bird_means))) bird_means_by_stage[stages[4:6][st]] = bird_means # Dots color coded by bird for b in range(nBirds): plt.scatter(nBins+(np.arange(0,2)*2)+2, bird_means_by_stage.loc[b], 50, color=colors_birds[b], alpha=.5) plt.ylim(0,150) plt.xlim(-.5,19) # x tick labels: label each bar of the bar graph separately c = (0,0,0) plt.xticks([0,2,4,6,8,10,13,15], [0,2,4,6,8,10,'nonREM','REM']); ax = plt.gca() [t.set_color(i) for (i,t) in zip([c,c,c,c,c,c,colors[4],colors[5]],ax.xaxis.get_ticklabels())] plt.ylabel('Number of episodes / hour', fontsize=axis_label_fontsize) plt.xlabel('Hour of night Total', fontsize=axis_label_fontsize) sns.despine() if savePlots: plt.savefig(saveAsPath + saveAsName + "d_NREM_lumped_frequencies.pdf") ``` ### FIGURE 3D: STATISTICS ``` # Mean frequencies: NREM vs REM stat.ttest_rel(bird_means_by_stage['n'], bird_means_by_stage['r']) # NREM: regression with hour of night test = All_per_bin_means[4] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) ``` ## Duration of episodes of each stage per bin ``` binSize_min = 60 binSize_s = np.timedelta64(int(binSize_min*60), 's') stageProportions_whole_night_all = {} for b in range(nBirds): nBins = int(np.ceil(np.min(lightsOnSec - lightsOffSec)/(60*binSize_min))) stageProportions = DataFrame([], columns=range(len(stages))) b_name = 'Bird ' + str(b+1) Scores = ScoresMerged[b_name] for bn in range(nBins): start_time = str(lightsOffDatetime[b] + bn*binSize_s).replace('T', ' ') end_time = str(lightsOffDatetime[b] + (bn+1)*binSize_s).replace('T', ' ') bn_scores = Scores[start_time:end_time] bn_stage_lengths = np.array([]) for st in range(len(stages)): bn_st_episodes = bn_scores[bn_scores['Label (#)'] == st] if len(bn_st_episodes) > 0: bn_avg_length = bn_st_episodes['Length (s)'].mean(0) else: bn_avg_length = np.nan bn_stage_lengths = np.append(bn_stage_lengths, bn_avg_length) stageProportions.loc[bn] = bn_stage_lengths # Add to dictionary stageProportions_whole_night_all[b] = stageProportions ``` ### FIGURE 3B: Plot ``` plt.figure(figsize=figsize) bird_means_by_stage = pd.DataFrame([]) All_per_bin_means = pd.DataFrame([]) for st in range(len(stages[4:6])): st_lengths = np.zeros((nBins, nBirds)) bird_means = np.zeros(nBirds) for b in range(nBirds): stageProportions = stageProportions_whole_night_all[b] st_lengths[:,b] = stageProportions[st+4] bird_means[b] = np.mean(stageProportions[st+4]) nighttime_mean = np.mean(st_lengths) per_bin_mean = np.mean(st_lengths, axis=1) per_bin_sd = np.std(st_lengths, axis=1) per_bin_sem = per_bin_sd / np.sqrt(nBirds) # save to dataframe All_per_bin_means[st+4] = per_bin_mean plt.errorbar(range(nBins), per_bin_mean, yerr=per_bin_sem, color=colors[4:6][st], linewidth=linewidth, alpha=linealpha, marker=marker, markersize=markersize, capsize=err_capsize, capthick=err_capthick, elinewidth=elinewidth); # Dots marking nighttime mean of each bird plt.scatter(np.ones(nBirds)*(nBins+(st*2)+2), bird_means, 50, color=colors[4:6][st]); # Bar graph of mean across all birds plt.bar(nBins+(st*2)+2, np.mean(bird_means), width=2, color='none',edgecolor=colors[4:6][st], linewidth=bar_linewidth); print(stages[4:6][st] + ' : mean ' + str(np.mean(bird_means)) + ', SD ' + str(np.std(bird_means))) bird_means_by_stage[stages[4:6][st]] = bird_means # Dots color coded by bird for b in range(nBirds): plt.scatter(nBins+(np.arange(0,2)*2)+2, bird_means_by_stage.loc[b], 50, color=colors_birds[b], alpha=.5) plt.ylim(0,60) plt.xlim(-.5,19) # x tick labels: label each bar of the bar graph separately c = (0,0,0) plt.xticks([0,2,4,6,8,10,13,15], [0,2,4,6,8,10,'nonREM','REM']); ax = plt.gca() [t.set_color(i) for (i,t) in zip([c,c,c,c,c,c,colors[4],colors[5]],ax.xaxis.get_ticklabels())] plt.ylabel('Mean duration of episodes (s)', fontsize=axis_label_fontsize) plt.xlabel('Hour of night Total', fontsize=axis_label_fontsize) sns.despine() if savePlots: plt.savefig(saveAsPath + saveAsName + "b_NREM_lumped_durations.pdf") ``` ### FIGURE 3B STATISTICS ``` # Mean durations: NREM vs REM stat.ttest_rel(bird_means_by_stage['n'], bird_means_by_stage['r']) # NREM: regression with hour of night test = All_per_bin_means[4] slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) # NREM: regression with hour of night test = All_per_bin_means[4][1:] # ONLY SIGNIFICANT IF FIRST HOUR OMITTED slope, intercept, r_value, p_value, std_err = stat.linregress(test.index.values, test.values) print('slope =', slope, ', r2 =', r_value**2, ', p =', p_value) ``` ## Save as csv ``` if saveData: for b in range(nBirds): b_name = 'Bird ' + str(b+1) stageProportions = stageProportions_whole_night_all[b] stageProportions.to_csv(saveAsPath + saveAsName + 'b_NREM_lumped_stage_durations_' + b_name + '.csv') ```
github_jupyter
``` client_id = '' client_secret = '' import base64 import requests import datetime from urllib.parse import urlencode class SpotifyAPI(object): access_token = None access_token_expires = datetime.datetime.now() access_token_did_expire = True client_id = None client_secret = None token_url = "https://accounts.spotify.com/api/token" def __init__(self, client_id, client_secret, *args, **kwargs): super().__init__(*args, **kwargs) self.client_id = client_id self.client_secret = client_secret def get_client_credentials(self): """ RETURNS A BASE 64 ENCODE STRING """ client_id = self.client_id client_secret = self.client_secret if client_id == None or client_secret == None: raise Exception("TOU MUST SET client_id AND client_secret") client_creds= f"{client_id}:{client_secret}" client_creds_b64 = base64.b64encode(client_creds.encode()) return client_creds_b64.decode() def get_token_headers(self): client_creds_b64 = self.get_client_credentials() return { "Authorization": f"Basic {client_creds_b64}" } def get_token_data(self): return { "grant_type": "client_credentials" } def perform_auth(self): token_url = self.token_url token_data = self.get_token_data() token_headers = self.get_token_headers() r = requests.post(token_url, data=token_data, headers=token_headers) # print(r.json()) if r.status_code not in range(200, 299): return False data = r.json() now = datetime.datetime.now() access_token = data['access_token'] expires_in = data['expires_in'] expires = now + datetime.timedelta(seconds=expires_in) self.access_token = access_token self.access_token_expires = expires self.access_token_did_expire = expires < now # print(f"access_token -> {access_token}") # print(f"now -> {now}") # print(f"expires_in -> {expires_in}") # print(f"expires -> {expires}") return True spotify = SpotifyAPI(client_id, client_secret) spotify.perform_auth() # spotify.search headers = { "Authorization": f"Bearer {spotify.access_token}" } endpoint_search = "https://api.spotify.com/v1/search" data = urlencode({"q": "Time", "type":"track"}) print(data) lookup_url = f"{endpoint_search}?{data}" print(lookup_url) r = requests.get(lookup_url, headers=headers) print(r.status_code) r.json() ```
github_jupyter
## Exercise 04: Plotting the Movement of an Aircraft with a Custom Layer In this exercise, we will take a look at how to create custom layers that allow you to not only display geo-spatial data but also animate your datapoints over time. We'll get a deeper understanding of how geoplotlib works and how layers are created and drawn. Our dataset does not only contain spatial but also temporal information which enables us to plot flights over time on our map. There is an example on how to do this with taxis in the examples folder of geoplotlib. https://github.com/andrea-cuttone/geoplotlib/blob/master/examples/taxi.py **Note:** The dataset can be found here: https://datamillnorth.org/dataset/flight-tracking #### Loading the dataset This time our dataset contains flight data recorded from different machines. Each entry is assigned to a unique plane through a `hex_ident`. Each location is related to a specific timestamp that consists of a `date` and a `time`. ``` # importing the necessary dependencies import pandas as pd # loading the dataset from the csv file dataset = pd.read_csv('../../Datasets/flight_tracking.csv') # displaying the first 5 rows of the dataset dataset.head() ``` Rename the latitude and longitude columns to lat and lon by using the rename method provided by pandas. ``` # renaming columns latitude to lat and longitude to lon dataset = dataset.rename(index=str, columns={"latitude": "lat", "longitude": "lon"}) ``` **Note:** Remember that geoplotlib needs columns that are named `lat` and `lon`. You will encounter an error if that is not the case. ``` # displaying the first 5 rows of the dataset dataset.head() ``` --- #### Adding an unix timestamp The easiest way to work with and handle time is to use a unix timestamp. In previous activities, we've already seen how to create a new column in our dataset by applying a function to it. We are using the datatime library to parse the date and time columns of our dataset and use it to create a unix timestamp. Combine date and time into a timestamp, using the, already provided, to_epoch method. ``` # method to convert date and time to an unix timestamp from datetime import datetime def to_epoch(date, time): try: timestamp = round(datetime.strptime('{} {}'.format(date, time), '%Y/%m/%d %H:%M:%S.%f').timestamp()) return timestamp except ValueError: return round(datetime.strptime('2017/09/11 17:02:06.418', '%Y/%m/%d %H:%M:%S.%f').timestamp()) ``` Use to_epoch and the apply method provided by the pandas DataFrame to create a new column called timestamp that holds the unix timestamp. ``` # creating a new column called timestamp with the to_epoch method applied dataset['timestamp'] = dataset.apply(lambda x: to_epoch(x['date'], x['time']), axis=1) # displaying the first 5 rows of the dataset dataset.head() ``` **Note:** We round up the miliseconds in our `to_epoch` method since epoch is the number of seconds (not miliseconds) that have passes since January 1st 1970. Of course we loose some precision here, but we want to focus on creating our own custom layer instead of wasting a lot of time with our dataset. --- #### Writing our custom layer After preparing our dataset, we can now start writing our custom layer. As mentioned at the beginning of this activity, it will be based on the taxi example of geoplotlib. We want to have a layer `TrackLayer` that takes an argument, dataset, which contains `lat` and `lon` data in combination with a `timestamp`. Given this data, we want to plot each point for each timestamp on the map, creating a tail behind the newest position of the plane. The geoplotlib colorbrewer is used to give each plane a color based on their unique `hex_ident`. The view (bounding box) of our visualization will be set to the city Leeds and a text information with the current timestamp is displayed in the upper right corner. ``` # custom layer creation import geoplotlib from geoplotlib.layers import BaseLayer from geoplotlib.core import BatchPainter from geoplotlib.colors import colorbrewer from geoplotlib.utils import epoch_to_str, BoundingBox class TrackLayer(BaseLayer): def __init__(self, dataset, bbox=BoundingBox.WORLD): self.data = dataset self.cmap = colorbrewer(self.data['hex_ident'], alpha=200) self.time = self.data['timestamp'].min() self.painter = BatchPainter() self.view = bbox def draw(self, proj, mouse_x, mouse_y, ui_manager): self.painter = BatchPainter() df = self.data.where((self.data['timestamp'] > self.time) & (self.data['timestamp'] <= self.time + 180)) for element in set(df['hex_ident']): grp = df.where(df['hex_ident'] == element) self.painter.set_color(self.cmap[element]) x, y = proj.lonlat_to_screen(grp['lon'], grp['lat']) self.painter.points(x, y, 15, rounded=True) self.time += 1 if self.time > self.data['timestamp'].max(): self.time = self.data['timestamp'].min() self.painter.batch_draw() ui_manager.info('Current timestamp: {}'.format(epoch_to_str(self.time))) # bounding box that gets used when layer is created def bbox(self): return self.view ``` --- #### Visualization with of the custom layer After creating the custom layer, using it is as simple as using any other layer in geoplotlib. We can use the `add_layer` method and pass in our custom layer class with the parameters needed. Our data is focused on the UK and specifically Leeds. So we want to adjust our bounding box to exactly this area. ``` # bounding box for our view on leeds from geoplotlib.utils import BoundingBox leeds_bbox = BoundingBox(north=53.8074, west=-3, south=53.7074 , east=0) ``` Use Geoplotlib to convert any pandas DataFrame into a DataAccessObject. ``` # displaying our custom layer using add_layer from geoplotlib.utils import DataAccessObject data = DataAccessObject(dataset) geoplotlib.add_layer(TrackLayer(data, bbox=leeds_bbox)) geoplotlib.show() ``` **Note:** In order to avoid any errors associated with the library, we have to convert our pandas dataframe to a geoplotlib DataAccessObject. The creator of geoplotlib provides a handy interface for this conversion. When looking at the upper right hand corner, we can clearly see the temporal aspect of this visualization. The first observation we make is that our data is really sparse, we sometimes only have a single data point for a plane, seldomly a whole path is drawn. Even though it is so sparse, we can already get a feeling about where the planes are flying most. **Note:** If you're interested in what else can be achieved with this custom layer approach, there are more examples in the geoplotlib repository. - https://github.com/andrea-cuttone/geoplotlib/blob/master/examples/follow_camera.py - https://github.com/andrea-cuttone/geoplotlib/blob/master/examples/quadtree.py - https://github.com/andrea-cuttone/geoplotlib/blob/master/examples/kmeans.py
github_jupyter
## Compile per MOA p value for shuffled comparison ``` import pathlib import numpy as np import pandas as pd import scipy.stats # Load L2 distances per MOA cp_l2_file = pathlib.Path("..", "cell-painting", "3.application", "L2_distances_with_moas.csv") cp_l2_df = pd.read_csv(cp_l2_file).assign(shuffled="real") cp_l2_df.loc[cp_l2_df.Model.str.contains("Shuffled"), "shuffled"] = "shuffled" cp_l2_df = cp_l2_df.assign( architecture=[x[-1] for x in cp_l2_df.Model.str.split(" ")], assay="CellPainting", metric="L2 distance" ).rename(columns={"L2 Distance": "metric_value"}) print(cp_l2_df.shape) cp_l2_df.head() # Load Pearson correlations per MOA cp_file = pathlib.Path("..", "cell-painting", "3.application", "pearson_with_moas.csv") cp_pearson_df = pd.read_csv(cp_file).assign(shuffled="real") cp_pearson_df.loc[cp_pearson_df.Model.str.contains("Shuffled"), "shuffled"] = "shuffled" cp_pearson_df = cp_pearson_df.assign( architecture=[x[-1] for x in cp_pearson_df.Model.str.split(" ")], assay="CellPainting", metric="Pearson correlation" ).rename(columns={"Pearson": "metric_value"}) print(cp_pearson_df.shape) cp_pearson_df.head() # Combine data cp_df = pd.concat([cp_l2_df, cp_pearson_df]).reset_index(drop=True) print(cp_df.shape) cp_df.head() all_moas = cp_df.MOA.unique().tolist() print(len(all_moas)) all_metrics = cp_df.metric.unique().tolist() all_architectures = cp_df.architecture.unique().tolist() all_architectures results_df = [] for metric in all_metrics: for moa in all_moas: for arch in all_architectures: # subset data to include moa per architecture sub_cp_df = ( cp_df .query(f"metric == '{metric}'") .query(f"architecture == '{arch}'") .query(f"MOA == '{moa}'") .reset_index(drop=True) ) real_ = sub_cp_df.query("shuffled == 'real'").loc[:, "metric_value"].tolist() shuff_ = sub_cp_df.query("shuffled != 'real'").loc[:, "metric_value"].tolist() # Calculate zscore consistently with other experiments zscore_result = scipy.stats.zscore(shuff_ + real_)[-1] results_df.append([moa, arch, zscore_result, metric]) # Compile results results_df = pd.DataFrame(results_df, columns=["MOA", "model", "zscore", "metric"]) print(results_df.shape) results_df.head() # Output data output_file = pathlib.Path("data", "MOA_LSA_metrics.tsv") results_df.to_csv(output_file, sep="\t", index=False) ```
github_jupyter
``` # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory from subprocess import check_output print(check_output(["ls", "./data"]).decode("utf8")) # Any results you write to the current directory are saved as output. import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score\ ,fbeta_score,classification_report,confusion_matrix,precision_recall_curve,roc_auc_score\ ,roc_curve df_full = pd.read_csv('./data/creditcard.csv') df_full.head() df_full.Class.value_counts() df_full.sort_values(by='Class', ascending=False, inplace=True) #easier for stratified sampling df_full.drop('Time', axis=1, inplace = True) df_full.head() df_sample = df_full.iloc[:3000,:] df_sample.Class.value_counts() feature = np.array(df_sample.values[:,0:29]) label = np.array(df_sample.values[:,-1]) from sklearn.utils import shuffle shuffle_df = shuffle(df_sample, random_state=42) df_train = shuffle_df[0:2400] df_test = shuffle_df[2400:] train_feature = np.array(df_train.values[:,0:29]) train_label = np.array(df_train.values[:,-1]) test_feature = np.array(df_test.values[:,0:29]) test_label = np.array(df_test.values[:,-1]) train_feature.shape train_label.shape from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() scaler.fit(train_feature) train_feature_trans = scaler.transform(train_feature) test_feature_trans = scaler.transform(test_feature) from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout import matplotlib.pyplot as plt def show_train_history(train_history,train,validation): plt.plot(train_history.history[train]) plt.plot(train_history.history[validation]) plt.title('Train History') plt.ylabel(train) plt.xlabel('Epoch') plt.legend(['train', 'validation'], loc='best') plt.show() model = Sequential() #一層一層到底,按順序 #輸入層(隱藏層1) model.add(Dense(units=200, input_dim=29, kernel_initializer='uniform', activation='relu')) model.add(Dropout(0.5)) #隱藏層2,不用寫input_dim,因為就是前一層的units model.add(Dense(units=200, kernel_initializer='uniform', activation='relu')) model.add(Dropout(0.5)) #輸出層 model.add(Dense(units=1, #輸出一個數字 kernel_initializer='uniform', activation='sigmoid')) print(model.summary()) #可以清楚看到model還有參數數量 model.compile(loss='binary_crossentropy', #二元用binary optimizer='adam', metrics=['accuracy']) train_history = model.fit(x=train_feature_trans, y=train_label, #上面多分割一步在keras是內建的 validation_split=0.8, epochs=200, batch_size=500, verbose=2) #verbose=2表示顯示訓練過程 ######################### 訓練過程視覺化 show_train_history(train_history,'acc','val_acc') show_train_history(train_history,'loss','val_loss') ######################### 實際測驗得分 scores = model.evaluate(test_feature_trans, test_label) print('\n') print('accuracy=',scores[1]) ######################### 紀錄模型預測情形(答案卷) prediction = model.predict_classes(test_feature_trans) #儲存訓練結果 model.save_weights("Keras_CreditCardFraud_MLP.h5") print('model saved to disk') df_ans = pd.DataFrame({'Real Class' :test_label}) df_ans['Prediction'] = prediction df_ans[ df_ans['Real Class'] != df_ans['Prediction'] ] df_ans['Prediction'].value_counts() df_ans['Real Class'].value_counts() import seaborn as sns %matplotlib inline cols = ['Real_Class_1','Real_Class_0'] #Gold standard rows = ['Prediction_1','Prediction_0'] #diagnostic tool (our prediction) B1P1 = len(df_ans[(df_ans['Prediction'] == df_ans['Real Class']) & (df_ans['Real Class'] == 1)]) B1P0 = len(df_ans[(df_ans['Prediction'] != df_ans['Real Class']) & (df_ans['Real Class'] == 1)]) B0P1 = len(df_ans[(df_ans['Prediction'] != df_ans['Real Class']) & (df_ans['Real Class'] == 0)]) B0P0 = len(df_ans[(df_ans['Prediction'] == df_ans['Real Class']) & (df_ans['Real Class'] == 0)]) conf = np.array([[B1P1,B0P1],[B1P0,B0P0]]) df_cm = pd.DataFrame(conf, columns = [i for i in cols], index = [i for i in rows]) f, ax= plt.subplots(figsize = (5, 5)) sns.heatmap(df_cm, annot=True, ax=ax, fmt='d') ax.xaxis.set_ticks_position('top') #Making x label be on top is common in textbooks. print('total test case number: ', np.sum(conf)) def model_efficacy(conf): total_num = np.sum(conf) sen = conf[0][0]/(conf[0][0]+conf[1][0]) spe = conf[1][1]/(conf[1][0]+conf[1][1]) false_positive_rate = conf[0][1]/(conf[0][1]+conf[1][1]) false_negative_rate = conf[1][0]/(conf[0][0]+conf[1][0]) print('total_num: ',total_num) print('G1P1: ',conf[0][0]) #G = gold standard; P = prediction print('G0P1: ',conf[0][1]) print('G1P0: ',conf[1][0]) print('G0P0: ',conf[1][1]) print('##########################') print('sensitivity: ',sen) print('specificity: ',spe) print('false_positive_rate: ',false_positive_rate) print('false_negative_rate: ',false_negative_rate) return total_num, sen, spe, false_positive_rate, false_negative_rate model_efficacy(conf) df_sample2 = df_full.iloc[:,:] #由於都是label=0,就不shuffle了 feature2 = np.array(df_sample2.values[:,0:29]) label2 = np.array(df_sample2.values[:,-1]) feature2_trans = scaler.transform(feature2) #using the same scaler as above ######################### 實際測驗得分 scores = model.evaluate(feature2_trans, label2) print('\n') print('accuracy=',scores[1]) ######################### 紀錄模型預測情形(答案卷) prediction2 = model.predict_classes(feature2_trans) prediction2_list = prediction2.reshape(-1).astype(int) label2_list = label2.astype(int) print(classification_report(label2_list, prediction2_list)) print(confusion_matrix(label2_list, prediction2_list)) conf = confusion_matrix(label2_list, prediction2_list) f, ax= plt.subplots(figsize = (5, 5)) sns.heatmap(conf, annot=True, ax=ax, fmt='d') ax.xaxis.set_ticks_position('top') #Making x label be on top is common in textbooks. def model_efficacy(conf): total_num = np.sum(conf) sen = conf[0][0]/(conf[0][0]+conf[1][0]) spe = conf[1][1]/(conf[1][0]+conf[1][1]) false_positive_rate = conf[0][1]/(conf[0][1]+conf[1][1]) false_negative_rate = conf[1][0]/(conf[0][0]+conf[1][0]) print('total_num: ',total_num) print('G1P1: ',conf[0][0]) #G = gold standard; P = prediction print('G0P1: ',conf[0][1]) print('G1P0: ',conf[1][0]) print('G0P0: ',conf[1][1]) print('##########################') print('sensitivity: ',sen) print('specificity: ',spe) print('false_positive_rate: ',false_positive_rate) print('false_negative_rate: ',false_negative_rate) return total_num, sen, spe, false_positive_rate, false_negative_rate model_efficacy(conf) input_nodes = [node.op.name for node in model.inputs] output_nodes = [node.op.name for node in model.outputs] print('input nodes => ' + str(input_nodes)) print('output nodes => ' + str(output_nodes)) import keras import tensorflow as tf from tensorflow.python.tools import freeze_graph from tensorflow.python.tools import optimize_for_inference_lib MODEL_NAME = 'yetAnotherModel' MODEL_PATH = './output/' + MODEL_NAME + '/' OPTIMISED_MODEL_PATH = '../pipelines/src/main/resources/models/' OPTIMISED_MODEL_NAME = 'optimised_' + MODEL_NAME + '.pb' OPTIMISED_MODEL_TXT_NAME = 'optimised_' + MODEL_NAME + '.pbtxt' checkpoint_path = MODEL_PATH + MODEL_NAME + '.ckpt' model_frozen_path = MODEL_PATH + 'frozen_' + MODEL_NAME + '.pb' sess = keras.backend.get_session() # Save produced model saver = tf.train.Saver() save_path = saver.save(sess, checkpoint_path) print ("Saved model at ", save_path) graph_path = tf.train.write_graph(sess.graph_def, MODEL_PATH, MODEL_NAME + ".pb", as_text=True) print ("Saved graph at :", graph_path) # Now freeze the graph (put variables into graph) input_saver_def_path = "" input_binary = False output_node_names = ', '.join(output_nodes) restore_op_name = "save/restore_all" filename_tensor_name = "save/Const:0" clear_devices = True freeze_graph.freeze_graph(graph_path, input_saver_def_path, input_binary, save_path, output_node_names, restore_op_name, filename_tensor_name, model_frozen_path, clear_devices, "") print ("Model is frozen") # optimizing graph input_graph_def = tf.GraphDef() with tf.gfile.Open(model_frozen_path, "rb") as f: data = f.read() input_graph_def.ParseFromString(data) output_graph_def = optimize_for_inference_lib.optimize_for_inference( input_graph_def, input_nodes, # an array of the input node(s) output_nodes, # an array of output nodes tf.float32.as_datatype_enum) tf.train.write_graph(output_graph_def, OPTIMISED_MODEL_PATH, OPTIMISED_MODEL_NAME, as_text=False) tf.train.write_graph(output_graph_def, MODEL_PATH, OPTIMISED_MODEL_TXT_NAME, as_text=True) ```
github_jupyter
# Image similarity estimation using a Siamese Network with a contrastive loss **Author:** Mehdi<br> **Date created:** 2021/05/06<br> **Last modified:** 2021/05/06<br> **ORIGINAL SOURCE:** https://github.com/keras-team/keras-io/blob/master/examples/vision/ipynb/siamese_contrastive.ipynb<br> **Description:** Similarity learning using a siamese network trained with a contrastive loss. ### NOTE: **We adapted the code for 1D data.** ## Introduction [Siamese Networks](https://en.wikipedia.org/wiki/Siamese_neural_network) are neural networks which share weights between two or more sister networks, each producing embedding vectors of its respective inputs. In supervised similarity learning, the networks are then trained to maximize the contrast (distance) between embeddings of inputs of different classes, while minimizing the distance between embeddings of similar classes, resulting in embedding spaces that reflect the class segmentation of the training inputs. ## Setup ``` import random import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt ``` ## Load the MNIST dataset ``` (x_train_val, y_train_val), (x_test_2D, y_test) = keras.datasets.mnist.load_data() # Change the data type to a floating point format x_train_val = x_train_val.astype("float32") x_test_2D = x_test_2D.astype("float32") ``` ## Define training and validation sets ``` # Keep 50% of train_val in validation set x_train_2D, x_val_2D = x_train_val[:30000], x_train_val[30000:] y_train, y_val = y_train_val[:30000], y_train_val[30000:] del x_train_val, y_train_val ``` ## Convert 2D to 1D ``` print(x_train_2D.shape, x_val_2D.shape, x_test_2D.shape) # convert 2D image to 1D image size = 28*28 x_train = x_train_2D.reshape(x_train_2D.shape[0], size) x_val = x_val_2D.reshape(x_val_2D.shape[0], size) x_test = x_test_2D.reshape(x_test_2D.shape[0], size) print(x_train.shape, x_val.shape, x_test.shape) ``` ## Create pairs of images We will train the model to differentiate between digits of different classes. For example, digit `0` needs to be differentiated from the rest of the digits (`1` through `9`), digit `1` - from `0` and `2` through `9`, and so on. To carry this out, we will select N random images from class A (for example, for digit `0`) and pair them with N random images from another class B (for example, for digit `1`). Then, we can repeat this process for all classes of digits (until digit `9`). Once we have paired digit `0` with other digits, we can repeat this process for the remaining classes for the rest of the digits (from `1` until `9`). ``` def make_pairs(x, y): """Creates a tuple containing image pairs with corresponding label. Arguments: x: List containing images, each index in this list corresponds to one image. y: List containing labels, each label with datatype of `int`. Returns: Tuple containing two numpy arrays as (pairs_of_samples, labels), where pairs_of_samples' shape is (2len(x), 2,n_features_dims) and labels are a binary array of shape (2len(x)). """ num_classes = max(y) + 1 digit_indices = [np.where(y == i)[0] for i in range(num_classes)] pairs = [] labels = [] for idx1 in range(len(x)): # add a matching example x1 = x[idx1] label1 = y[idx1] idx2 = random.choice(digit_indices[label1]) x2 = x[idx2] pairs += [[x1, x2]] labels += [1] # add a non-matching example label2 = random.randint(0, num_classes - 1) while label2 == label1: label2 = random.randint(0, num_classes - 1) idx2 = random.choice(digit_indices[label2]) x2 = x[idx2] pairs += [[x1, x2]] labels += [0] return np.array(pairs), np.array(labels).astype("float32") # make train pairs pairs_train, labels_train = make_pairs(x_train, y_train) # make validation pairs pairs_val, labels_val = make_pairs(x_val, y_val) # make test pairs pairs_test, labels_test = make_pairs(x_test, y_test) print(pairs_train.shape, pairs_val.shape, pairs_test.shape) ``` We get: - We have 60,000 pairs - Each pair contains 2 images - Each image has shape `(784)` Split the training pairs ``` x_train_1 = pairs_train[:, 0] x_train_2 = pairs_train[:, 1] ``` Split the validation pairs ``` x_val_1 = pairs_val[:, 0] x_val_2 = pairs_val[:, 1] ``` Split the test pairs ``` x_test_1 = pairs_test[:, 0] # x_test_1.shape = (20000, 784) x_test_2 = pairs_test[:, 1] ``` ## Visualize pairs and their labels ``` def visualize(pairs, labels, to_show=6, num_col=3, predictions=None, test=False): """Creates a plot of pairs and labels, and prediction if it's test dataset. Arguments: pairs: Numpy Array, of pairs to visualize, having shape (Number of pairs, 2, 28, 28). to_show: Int, number of examples to visualize (default is 6) `to_show` must be an integral multiple of `num_col`. Otherwise it will be trimmed if it is greater than num_col, and incremented if if it is less then num_col. num_col: Int, number of images in one row - (default is 3) For test and train respectively, it should not exceed 3 and 7. predictions: Numpy Array of predictions with shape (to_show, 1) - (default is None) Must be passed when test=True. test: Boolean telling whether the dataset being visualized is train dataset or test dataset - (default False). Returns: None. """ # Define num_row # If to_show % num_col != 0 # trim to_show, # to trim to_show limit num_row to the point where # to_show % num_col == 0 # # If to_show//num_col == 0 # then it means num_col is greater then to_show # increment to_show # to increment to_show set num_row to 1 num_row = to_show // num_col if to_show // num_col != 0 else 1 # `to_show` must be an integral multiple of `num_col` # we found num_row and we have num_col # to increment or decrement to_show # to make it integral multiple of `num_col` # simply set it equal to num_row * num_col to_show = num_row * num_col # Plot the images fig, axes = plt.subplots(num_row, num_col, figsize=(5, 5)) for i in range(to_show): # If the number of rows is 1, the axes array is one-dimensional if num_row == 1: ax = axes[i % num_col] else: ax = axes[i // num_col, i % num_col] ax.imshow(tf.concat([pairs[i][0].reshape(28,28), pairs[i][1].reshape(28,28)], axis=1), cmap="gray") ax.set_axis_off() if test: ax.set_title("True: {} | Pred: {:.5f}".format(labels[i], predictions[i][0][0])) # TODO: check shape of predictions else: ax.set_title("Label: {}".format(labels[i])) if test: plt.tight_layout(rect=(0, 0, 1.9, 1.9), w_pad=0.0) else: plt.tight_layout(rect=(0, 0, 1.5, 1.5)) plt.show() ``` Inspect training pairs ``` visualize(pairs_train[:-1], labels_train[:-1], to_show=4, num_col=4) ``` Inspect validation pairs ``` visualize(pairs_val[:-1], labels_val[:-1], to_show=4, num_col=4) ``` Inspect test pairs ``` visualize(pairs_test[:-1], labels_test[:-1], to_show=4, num_col=4) ``` ## Define the model There are be two input layers, each leading to its own network, which produces embeddings. A `Lambda` layer then merges them using an [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) and the merged output is fed to the final network. ``` # Provided two tensors t1 and t2 # Euclidean distance = sqrt(sum(square(t1-t2))) def euclidean_distance(vects): """Find the Euclidean distance between two vectors. Arguments: vects: List containing two tensors of same length. Returns: Tensor containing euclidean distance (as floating point value) between vectors. """ x, y = vects sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True) return tf.math.sqrt(tf.math.maximum(sum_square, tf.keras.backend.epsilon())) def make_model(input_shape): input = layers.Input(input_shape) x = tf.keras.layers.BatchNormalization()(input) x = layers.Conv1D(4, 5, activation="tanh")(x) x = layers.AveragePooling1D(pool_size=2)(x) x = layers.Conv1D(16, 5, activation="tanh")(x) x = layers.AveragePooling1D(pool_size=2)(x) x = layers.Conv1D(32, 5, activation="tanh")(x) x = layers.AveragePooling1D(pool_size=2)(x) x = tf.keras.layers.BatchNormalization()(x) x = layers.Dense(10, activation="tanh")(x) embedding_network = keras.Model(input, x) input_1 = layers.Input(input_shape) input_2 = layers.Input(input_shape) # As mentioned above, Siamese Network share weights between # tower networks (sister networks). To allow this, we will use # same embedding network for both tower networks. tower_1 = embedding_network(input_1) tower_2 = embedding_network(input_2) merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2]) normal_layer = tf.keras.layers.BatchNormalization()(merge_layer) output_layer = layers.Dense(1, activation="sigmoid")(normal_layer) siamese = keras.Model(inputs=[input_1, input_2], outputs=output_layer) return siamese, embedding_network siamese, embedding_network = make_model((784,1)) embedding_network.summary() siamese.summary() ``` ## Define the constrastive Loss ``` def loss(margin=1): """Provides 'constrastive_loss' an enclosing scope with variable 'margin'. Arguments: margin: Integer, defines the baseline for distance for which pairs should be classified as dissimilar. - (default is 1). Returns: 'constrastive_loss' function with data ('margin') attached. """ # Contrastive loss = mean( (1-true_value) * square(prediction) + # true_value * square( max(margin-prediction, 0) )) def contrastive_loss(y_true, y_pred): """Calculates the constrastive loss. Arguments: y_true: List of labels, each label is of type float32. y_pred: List of predictions of same length as of y_true, each label is of type float32. Returns: A tensor containing constrastive loss as floating point value. """ square_pred = tf.math.square(y_pred) margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0)) return tf.math.reduce_mean( (1 - y_true) * square_pred + (y_true) * margin_square ) return contrastive_loss ``` ## Hyperparameters ``` epochs = 10 batch_size = 16 margin = 1 # Margin for constrastive loss. ``` ## Compile the model with the contrastive loss ``` siamese.compile(loss=loss(margin=margin), optimizer="RMSprop", metrics=["accuracy"]) ``` ## Train the model ``` history = siamese.fit( [x_train_1, x_train_2], labels_train, validation_data=([x_val_1, x_val_2], labels_val), batch_size=batch_size, epochs=epochs, ) ``` ## Visualize results ``` def plt_metric(history, metric, title, has_valid=True): """Plots the given 'metric' from 'history'. Arguments: history: history attribute of History object returned from Model.fit. metric: Metric to plot, a string value present as key in 'history'. title: A string to be used as title of plot. has_valid: Boolean, true if valid data was passed to Model.fit else false. Returns: None. """ plt.plot(history[metric]) if has_valid: plt.plot(history["val_" + metric]) plt.legend(["train", "validation"], loc="upper left") plt.title(title) plt.ylabel(metric) plt.xlabel("epoch") plt.show() # Plot the accuracy plt_metric(history=history.history, metric="accuracy", title="Model accuracy") # Plot the constrastive loss plt_metric(history=history.history, metric="loss", title="Constrastive Loss") ``` ## Evaluate the model ``` results = siamese.evaluate([x_test_1, x_test_2], labels_test) print("test loss, test acc:", results) ``` ## Visualize the predictions ``` predictions = siamese.predict([x_test_1, x_test_2]) visualize(pairs_test, labels_test, to_show=3, predictions=predictions, test=True) ```
github_jupyter
##### Copyright 2018 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Ragged Tensors <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/beta/guide/ragged_tensors"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/site/en/r2/guide/ragged_tensors.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Setup ``` from __future__ import absolute_import, division, print_function, unicode_literals import math !pip install tensorflow==2.0.0-beta0 import tensorflow as tf ``` ## Overview Your data comes in many shapes; your tensors should too. *Ragged tensors* are the TensorFlow equivalent of nested variable-length lists. They make it easy to store and process data with non-uniform shapes, including: * Variable-length features, such as the set of actors in a movie. * Batches of variable-length sequential inputs, such as sentences or video clips. * Hierarchical inputs, such as text documents that are subdivided into sections, paragraphs, sentences, and words. * Individual fields in structured inputs, such as protocol buffers. ### What you can do with a ragged tensor Ragged tensors are supported by more than a hundred TensorFlow operations, including math operations (such as `tf.add` and `tf.reduce_mean`), array operations (such as `tf.concat` and `tf.tile`), string manipulation ops (such as `tf.substr`), and many others: ``` digits = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) words = tf.ragged.constant([["So", "long"], ["thanks", "for", "all", "the", "fish"]]) print(tf.add(digits, 3)) print(tf.reduce_mean(digits, axis=1)) print(tf.concat([digits, [[5, 3]]], axis=0)) print(tf.tile(digits, [1, 2])) print(tf.strings.substr(words, 0, 2)) ``` There are also a number of methods and operations that are specific to ragged tensors, including factory methods, conversion methods, and value-mapping operations. For a list of supported ops, see the `tf.ragged` package documentation. As with normal tensors, you can use Python-style indexing to access specific slices of a ragged tensor. For more information, see the section on **Indexing** below. ``` print(digits[0]) # First row print(digits[:, :2]) # First two values in each row. print(digits[:, -2:]) # Last two values in each row. ``` And just like normal tensors, you can use Python arithmetic and comparison operators to perform elementwise operations. For more information, see the section on **Overloaded Operators** below. ``` print(digits + 3) print(digits + tf.ragged.constant([[1, 2, 3, 4], [], [5, 6, 7], [8], []])) ``` If you need to perform an elementwise transformation to the values of a `RaggedTensor`, you can use `tf.ragged.map_flat_values`, which takes a function plus one or more arguments, and applies the function to transform the `RaggedTensor`'s values. ``` times_two_plus_one = lambda x: x * 2 + 1 print(tf.ragged.map_flat_values(times_two_plus_one, digits)) ``` ### Constructing a ragged tensor The simplest way to construct a ragged tensor is using `tf.ragged.constant`, which builds the `RaggedTensor` corresponding to a given nested Python `list`: ``` sentences = tf.ragged.constant([ ["Let's", "build", "some", "ragged", "tensors", "!"], ["We", "can", "use", "tf.ragged.constant", "."]]) print(sentences) paragraphs = tf.ragged.constant([ [['I', 'have', 'a', 'cat'], ['His', 'name', 'is', 'Mat']], [['Do', 'you', 'want', 'to', 'come', 'visit'], ["I'm", 'free', 'tomorrow']], ]) print(paragraphs) ``` Ragged tensors can also be constructed by pairing flat *values* tensors with *row-partitioning* tensors indicating how those values should be divided into rows, using factory classmethods such as `tf.RaggedTensor.from_value_rowids`, `tf.RaggedTensor.from_row_lengths`, and `tf.RaggedTensor.from_row_splits`. #### `tf.RaggedTensor.from_value_rowids` If you know which row each value belongs in, then you can build a `RaggedTensor` using a `value_rowids` row-partitioning tensor: ![value_rowids](https://www.tensorflow.org/images/ragged_tensors/value_rowids.png) ``` print(tf.RaggedTensor.from_value_rowids( values=[3, 1, 4, 1, 5, 9, 2, 6], value_rowids=[0, 0, 0, 0, 2, 2, 2, 3])) ``` #### `tf.RaggedTensor.from_row_lengths` If you know how long each row is, then you can use a `row_lengths` row-partitioning tensor: ![row_lengths](https://www.tensorflow.org/images/ragged_tensors/row_lengths.png) ``` print(tf.RaggedTensor.from_row_lengths( values=[3, 1, 4, 1, 5, 9, 2, 6], row_lengths=[4, 0, 3, 1])) ``` #### `tf.RaggedTensor.from_row_splits` If you know the index where each row starts and ends, then you can use a `row_splits` row-partitioning tensor: ![row_splits](https://www.tensorflow.org/images/ragged_tensors/row_splits.png) ``` print(tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8])) ``` See the `tf.RaggedTensor` class documentation for a full list of factory methods. ### What you can store in a ragged tensor As with normal `Tensor`s, the values in a `RaggedTensor` must all have the same type; and the values must all be at the same nesting depth (the *rank* of the tensor): ``` print(tf.ragged.constant([["Hi"], ["How", "are", "you"]])) # ok: type=string, rank=2 print(tf.ragged.constant([[[1, 2], [3]], [[4, 5]]])) # ok: type=int32, rank=3 try: tf.ragged.constant([["one", "two"], [3, 4]]) # bad: multiple types except ValueError as exception: print(exception) try: tf.ragged.constant(["A", ["B", "C"]]) # bad: multiple nesting depths except ValueError as exception: print(exception) ``` ### Example use case The following example demonstrates how `RaggedTensor`s can be used to construct and combine unigram and bigram embeddings for a batch of variable-length queries, using special markers for the beginning and end of each sentence. For more details on the ops used in this example, see the `tf.ragged` package documentation. ``` queries = tf.ragged.constant([['Who', 'is', 'Dan', 'Smith'], ['Pause'], ['Will', 'it', 'rain', 'later', 'today']]) # Create an embedding table. num_buckets = 1024 embedding_size = 4 embedding_table = tf.Variable( tf.random.truncated_normal([num_buckets, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) # Look up the embedding for each word. word_buckets = tf.strings.to_hash_bucket_fast(queries, num_buckets) word_embeddings = tf.ragged.map_flat_values( tf.nn.embedding_lookup, embedding_table, word_buckets) # ① # Add markers to the beginning and end of each sentence. marker = tf.fill([queries.nrows(), 1], '#') padded = tf.concat([marker, queries, marker], axis=1) # ② # Build word bigrams & look up embeddings. bigrams = tf.strings.join([padded[:, :-1], padded[:, 1:]], separator='+') # ③ bigram_buckets = tf.strings.to_hash_bucket_fast(bigrams, num_buckets) bigram_embeddings = tf.ragged.map_flat_values( tf.nn.embedding_lookup, embedding_table, bigram_buckets) # ④ # Find the average embedding for each sentence all_embeddings = tf.concat([word_embeddings, bigram_embeddings], axis=1) # ⑤ avg_embedding = tf.reduce_mean(all_embeddings, axis=1) # ⑥ print(avg_embedding) ``` ![ragged_example](https://www.tensorflow.org/images/ragged_tensors/ragged_example.png) ## Ragged tensors: definitions ### Ragged and uniform dimensions A *ragged tensor* is a tensor with one or more *ragged dimensions*, which are dimensions whose slices may have different lengths. For example, the inner (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged, since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths. Dimensions whose slices all have the same length are called *uniform dimensions*. The outermost dimension of a ragged tensor is always uniform, since it consists of a single slice (and so there is no possibility for differing slice lengths). In addition to the uniform outermost dimension, ragged tensors may also have uniform inner dimensions. For example, we might store the word embeddings for each word in a batch of sentences using a ragged tensor with shape `[num_sentences, (num_words), embedding_size]`, where the parentheses around `(num_words)` indicate that the dimension is ragged. ![sent_word_embed](https://www.tensorflow.org/images/ragged_tensors/sent_word_embed.png) Ragged tensors may have multiple ragged dimensions. For example, we could store a batch of structured text documents using a tensor with shape `[num_documents, (num_paragraphs), (num_sentences), (num_words)]` (where again parentheses are used to indicate ragged dimensions). #### Ragged tensor shape restrictions The shape of a ragged tensor is currently restricted to have the following form: * A single uniform dimension * Followed by one or more ragged dimensions * Followed by zero or more uniform dimensions. Note: These restrictions are a consequence of the current implementation, and we may relax them in the future. ### Rank and ragged rank The total number of dimensions in a ragged tensor is called its ***rank***, and the number of ragged dimensions in a ragged tensor is called its ***ragged rank***. In graph execution mode (i.e., non-eager mode), a tensor's ragged rank is fixed at creation time: it can't depend on runtime values, and can't vary dynamically for different session runs. A ***potentially ragged tensor*** is a value that might be either a `tf.Tensor` or a `tf.RaggedTensor`. The ragged rank of a `tf.Tensor` is defined to be zero. ### RaggedTensor shapes When describing the shape of a RaggedTensor, ragged dimensions are indicated by enclosing them in parentheses. For example, as we saw above, the shape of a 3-D RaggedTensor that stores word embeddings for each word in a batch of sentences can be written as `[num_sentences, (num_words), embedding_size]`. The `RaggedTensor.shape` attribute returns a `tf.TensorShape` for a ragged tensor, where ragged dimensions have size `None`: ``` tf.ragged.constant([["Hi"], ["How", "are", "you"]]).shape ``` The method `tf.RaggedTensor.bounding_shape` can be used to find a tight bounding shape for a given `RaggedTensor`: ``` print(tf.ragged.constant([["Hi"], ["How", "are", "you"]]).bounding_shape()) ``` ## Ragged vs sparse tensors A ragged tensor should *not* be thought of as a type of sparse tensor, but rather as a dense tensor with an irregular shape. As an illustrative example, consider how array operations such as `concat`, `stack`, and `tile` are defined for ragged vs. sparse tensors. Concatenating ragged tensors joins each row to form a single row with the combined length: ![ragged_concat](https://www.tensorflow.org/images/ragged_tensors/ragged_concat.png) ``` ragged_x = tf.ragged.constant([["John"], ["a", "big", "dog"], ["my", "cat"]]) ragged_y = tf.ragged.constant([["fell", "asleep"], ["barked"], ["is", "fuzzy"]]) print(tf.concat([ragged_x, ragged_y], axis=1)) ``` But concatenating sparse tensors is equivalent to concatenating the corresponding dense tensors, as illustrated by the following example (where Ø indicates missing values): ![sparse_concat](https://www.tensorflow.org/images/ragged_tensors/sparse_concat.png) ``` sparse_x = ragged_x.to_sparse() sparse_y = ragged_y.to_sparse() sparse_result = tf.sparse.concat(sp_inputs=[sparse_x, sparse_y], axis=1) print(tf.sparse.to_dense(sparse_result, '')) ``` For another example of why this distinction is important, consider the definition of “the mean value of each row” for an op such as `tf.reduce_mean`. For a ragged tensor, the mean value for a row is the sum of the row’s values divided by the row’s width. But for a sparse tensor, the mean value for a row is the sum of the row’s values divided by the sparse tensor’s overall width (which is greater than or equal to the width of the longest row). ## Overloaded operators The `RaggedTensor` class overloads the standard Python arithmetic and comparison operators, making it easy to perform basic elementwise math: ``` x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) y = tf.ragged.constant([[1, 1], [2], [3, 3, 3]]) print(x + y) ``` Since the overloaded operators perform elementwise computations, the inputs to all binary operations must have the same shape, or be broadcastable to the same shape. In the simplest broadcasting case, a single scalar is combined elementwise with each value in a ragged tensor: ``` x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]]) print(x + 3) ``` For a discussion of more advanced cases, see the section on **Broadcasting**. Ragged tensors overload the same set of operators as normal `Tensor`s: the unary operators `-`, `~`, and `abs()`; and the binary operators `+`, `-`, `*`, `/`, `//`, `%`, `**`, `&`, `|`, `^`, `<`, `<=`, `>`, and `>=`. Note that, as with standard `Tensor`s, binary `==` is not overloaded; you can use `tf.equal()` to check elementwise equality. ## Indexing Ragged tensors support Python-style indexing, including multidimensional indexing and slicing. The following examples demonstrate ragged tensor indexing with a 2-D and a 3-D ragged tensor. ### Indexing a 2-D ragged tensor with 1 ragged dimension ``` queries = tf.ragged.constant( [['Who', 'is', 'George', 'Washington'], ['What', 'is', 'the', 'weather', 'tomorrow'], ['Goodnight']]) print(queries[1]) print(queries[1, 2]) # A single word print(queries[1:]) # Everything but the first row print(queries[:, :3]) # The first 3 words of each query print(queries[:, -2:]) # The last 2 words of each query ``` ### Indexing a 3-D ragged tensor with 2 ragged dimensions ``` rt = tf.ragged.constant([[[1, 2, 3], [4]], [[5], [], [6]], [[7]], [[8, 9], [10]]]) print(rt[1]) # Second row (2-D RaggedTensor) print(rt[3, 0]) # First element of fourth row (1-D Tensor) print(rt[:, 1:3]) # Items 1-3 of each row (3-D RaggedTensor) print(rt[:, -1:]) # Last item of each row (3-D RaggedTensor) ``` `RaggedTensor`s supports multidimensional indexing and slicing, with one restriction: indexing into a ragged dimension is not allowed. This case is problematic because the indicated value may exist in some rows but not others. In such cases, it's not obvious whether we should (1) raise an `IndexError`; (2) use a default value; or (3) skip that value and return a tensor with fewer rows than we started with. Following the [guiding principles of Python](https://www.python.org/dev/peps/pep-0020/) ("In the face of ambiguity, refuse the temptation to guess" ), we currently disallow this operation. ## Tensor Type Conversion The `RaggedTensor` class defines methods that can be used to convert between `RaggedTensor`s and `tf.Tensor`s or `tf.SparseTensors`: ``` ragged_sentences = tf.ragged.constant([ ['Hi'], ['Welcome', 'to', 'the', 'fair'], ['Have', 'fun']]) print(ragged_sentences.to_tensor(default_value='')) print(ragged_sentences.to_sparse()) x = [[1, 3, -1, -1], [2, -1, -1, -1], [4, 5, 8, 9]] print(tf.RaggedTensor.from_tensor(x, padding=-1)) st = tf.SparseTensor(indices=[[0, 0], [2, 0], [2, 1]], values=['a', 'b', 'c'], dense_shape=[3, 3]) print(tf.RaggedTensor.from_sparse(st)) ``` ## Evaluating ragged tensors ### Eager execution In eager execution mode, ragged tensors are evaluated immediately. To access the values they contain, you can: * Use the `tf.RaggedTensor.to_list()` method, which converts the ragged tensor to a Python `list`. ``` rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]]) print(rt.to_list()) ``` * Use Python indexing. If the tensor piece you select contains no ragged dimensions, then it will be returned as an `EagerTensor`. You can then use the `numpy()` method to access the value directly. ``` print(rt[1].numpy()) ``` * Decompose the ragged tensor into its components, using the `tf.RaggedTensor.values` and `tf.RaggedTensor.row_splits` properties, or row-paritioning methods such as `tf.RaggedTensor.row_lengths()` and `tf.RaggedTensor.value_rowids()`. ``` print(rt.values) print(rt.row_splits) ``` ### Broadcasting Broadcasting is the process of making tensors with different shapes have compatible shapes for elementwise operations. For more background on broadcasting, see: * [Numpy: Broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * `tf.broadcast_dynamic_shape` * `tf.broadcast_to` The basic steps for broadcasting two inputs `x` and `y` to have compatible shapes are: 1. If `x` and `y` do not have the same number of dimensions, then add outer dimensions (with size 1) until they do. 2. For each dimension where `x` and `y` have different sizes: * If `x` or `y` have size `1` in dimension `d`, then repeat its values across dimension `d` to match the other input's size. * Otherwise, raise an exception (`x` and `y` are not broadcast compatible). Where the size of a tensor in a uniform dimension is a single number (the size of slices across that dimension); and the size of a tensor in a ragged dimension is a list of slice lengths (for all slices across that dimension). #### Broadcasting examples ``` # x (2D ragged): 2 x (num_rows) # y (scalar) # result (2D ragged): 2 x (num_rows) x = tf.ragged.constant([[1, 2], [3]]) y = 3 print(x + y) # x (2d ragged): 3 x (num_rows) # y (2d tensor): 3 x 1 # Result (2d ragged): 3 x (num_rows) x = tf.ragged.constant( [[10, 87, 12], [19, 53], [12, 32]]) y = [[1000], [2000], [3000]] print(x + y) # x (3d ragged): 2 x (r1) x 2 # y (2d ragged): 1 x 1 # Result (3d ragged): 2 x (r1) x 2 x = tf.ragged.constant( [[[1, 2], [3, 4], [5, 6]], [[7, 8]]], ragged_rank=1) y = tf.constant([[10]]) print(x + y) # x (3d ragged): 2 x (r1) x (r2) x 1 # y (1d tensor): 3 # Result (3d ragged): 2 x (r1) x (r2) x 3 x = tf.ragged.constant( [ [ [[1], [2]], [], [[3]], [[4]], ], [ [[5], [6]], [[7]] ] ], ragged_rank=2) y = tf.constant([10, 20, 30]) print(x + y) ``` Here are some examples of shapes that do not broadcast: ``` # x (2d ragged): 3 x (r1) # y (2d tensor): 3 x 4 # trailing dimensions do not match x = tf.ragged.constant([[1, 2], [3, 4, 5, 6], [7]]) y = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) # x (2d ragged): 3 x (r1) # y (2d ragged): 3 x (r2) # ragged dimensions do not match. x = tf.ragged.constant([[1, 2, 3], [4], [5, 6]]) y = tf.ragged.constant([[10, 20], [30, 40], [50]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) # x (3d ragged): 3 x (r1) x 2 # y (3d ragged): 3 x (r1) x 3 # trailing dimensions do not match x = tf.ragged.constant([[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10]]]) y = tf.ragged.constant([[[1, 2, 0], [3, 4, 0], [5, 6, 0]], [[7, 8, 0], [9, 10, 0]]]) try: x + y except tf.errors.InvalidArgumentError as exception: print(exception) ``` ## RaggedTensor encoding Ragged tensors are encoded using the `RaggedTensor` class. Internally, each `RaggedTensor` consists of: * A `values` tensor, which concatenates the variable-length rows into a flattened list. * A `row_splits` vector, which indicates how those flattened values are divided into rows. In particular, the values for row `rt[i]` are stored in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`. ![ragged_encoding](https://www.tensorflow.org/images/ragged_tensors/ragged_encoding.png) ``` rt = tf.RaggedTensor.from_row_splits( values=[3, 1, 4, 1, 5, 9, 2], row_splits=[0, 4, 4, 6, 7]) print(rt) ``` ### Multiple ragged dimensions A ragged tensor with multiple ragged dimensions is encoded by using a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor` adds a single ragged dimension. ![ragged_rank_2](https://www.tensorflow.org/images/ragged_tensors/ragged_rank_2.png) ``` rt = tf.RaggedTensor.from_row_splits( values=tf.RaggedTensor.from_row_splits( values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], row_splits=[0, 3, 3, 5, 9, 10]), row_splits=[0, 1, 1, 5]) print(rt) print("Shape: {}".format(rt.shape)) print("Number of ragged dimensions: {}".format(rt.ragged_rank)) ``` The factory function `tf.RaggedTensor.from_nested_row_splits` may be used to construct a RaggedTensor with multiple ragged dimensions directly, by providing a list of `row_splits` tensors: ``` rt = tf.RaggedTensor.from_nested_row_splits( flat_values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19], nested_row_splits=([0, 1, 1, 5], [0, 3, 3, 5, 9, 10])) print(rt) ``` ### Uniform Inner Dimensions Ragged tensors with uniform inner dimensions are encoded by using a multidimensional `tf.Tensor` for `values`. ![uniform_inner](https://www.tensorflow.org/images/ragged_tensors/uniform_inner.png) ``` rt = tf.RaggedTensor.from_row_splits( values=[[1, 3], [0, 0], [1, 3], [5, 3], [3, 3], [1, 2]], row_splits=[0, 3, 4, 6]) print(rt) print("Shape: {}".format(rt.shape)) print("Number of ragged dimensions: {}".format(rt.ragged_rank)) ``` ### Alternative row-partitioning schemes The `RaggedTensor` class uses `row_splits` as the primary mechanism to store information about how the values are partitioned into rows. However, `RaggedTensor` also provides support for four alternative row-partitioning schemes, which can be more convenient to use depending on how your data is formatted. Internally, `RaggedTensor` uses these additional schemes to improve efficiency in some contexts. <dl> <dt>Row lengths</dt> <dd>`row_lengths` is a vector with shape `[nrows]`, which specifies the length of each row.</dd> <dt>Row starts</dt> <dd>`row_starts` is a vector with shape `[nrows]`, which specifies the start offset of each row. Equivalent to `row_splits[:-1]`.</dd> <dt>Row limits</dt> <dd>`row_limits` is a vector with shape `[nrows]`, which specifies the stop offset of each row. Equivalent to `row_splits[1:]`.</dd> <dt>Row indices and number of rows</dt> <dd>`value_rowids` is a vector with shape `[nvals]`, corresponding one-to-one with values, which specifies each value's row index. In particular, the row `rt[row]` consists of the values `rt.values[j]` where `value_rowids[j]==row`. \ `nrows` is an integer that specifies the number of rows in the `RaggedTensor`. In particular, `nrows` is used to indicate trailing empty rows.</dd> </dl> For example, the following ragged tensors are equivalent: ``` values = [3, 1, 4, 1, 5, 9, 2, 6] print(tf.RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])) print(tf.RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])) print(tf.RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])) print(tf.RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])) print(tf.RaggedTensor.from_value_rowids( values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)) ``` The RaggedTensor class defines methods which can be used to construct each of these row-partitioning tensors. ``` rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) print(" values: {}".format(rt.values)) print(" row_splits: {}".format(rt.row_splits)) print(" row_lengths: {}".format(rt.row_lengths())) print(" row_starts: {}".format(rt.row_starts())) print(" row_limits: {}".format(rt.row_limits())) print("value_rowids: {}".format(rt.value_rowids())) ``` (Note that `tf.RaggedTensor.values` and `tf.RaggedTensors.row_splits` are properties, while the remaining row-partitioning accessors are all methods. This reflects the fact that the `row_splits` are the primary underlying representation, and the other row-partitioning tensors must be computed.) Some of the advantages and disadvantages of the different row-partitioning schemes are: + **Efficient indexing**: The `row_splits`, `row_starts`, and `row_limits` schemes all enable constant-time indexing into ragged tensors. The `value_rowids` and `row_lengths` schemes do not. + **Small encoding size**: The `value_rowids` scheme is more efficient when storing ragged tensors that have a large number of empty rows, since the size of the tensor depends only on the total number of values. On the other hand, the other four encodings are more efficient when storing ragged tensors with longer rows, since they require only one scalar value for each row. + **Efficient concatenation**: The `row_lengths` scheme is more efficient when concatenating ragged tensors, since row lengths do not change when two tensors are concatenated together (but row splits and row indices do). + **Compatibility**: The `value_rowids` scheme matches the [segmentation](../api_guides/python/math_ops.md#Segmentation) format used by operations such as `tf.segment_sum`. The `row_limits` scheme matches the format used by ops such as `tf.sequence_mask`. ``` ```
github_jupyter
``` # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` # Text-to-Video retrieval with S3D MIL-NCE <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/text_to_video_retrieval_with_s3d_milnce"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/text_to_video_retrieval_with_s3d_milnce.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/text_to_video_retrieval_with_s3d_milnce.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/text_to_video_retrieval_with_s3d_milnce.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ``` !pip install -q opencv-python import os import tensorflow.compat.v2 as tf import tensorflow_hub as hub import numpy as np import cv2 from IPython import display import math ``` ## Import TF-Hub model This tutorial demonstrates how to use the [S3D MIL-NCE model](https://tfhub.dev/deepmind/mil-nce/s3d/1) from TensorFlow Hub to do **text-to-video retrieval** to find the most similar videos for a given text query. The model has 2 signatures, one for generating *video embeddings* and one for generating *text embeddings*. We will use these embedding to find the nearest neighbors in the embedding space. ``` # Load the model once from TF-Hub. hub_handle = 'https://tfhub.dev/deepmind/mil-nce/s3d/1' hub_model = hub.load(hub_handle) def generate_embeddings(model, input_frames, input_words): """Generate embeddings from the model from video frames and input words.""" # Input_frames must be normalized in [0, 1] and of the shape Batch x T x H x W x 3 vision_output = model.signatures['video'](tf.constant(tf.cast(input_frames, dtype=tf.float32))) text_output = model.signatures['text'](tf.constant(input_words)) return vision_output['video_embedding'], text_output['text_embedding'] # @title Define video loading and visualization functions { display-mode: "form" } # Utilities to open video files using CV2 def crop_center_square(frame): y, x = frame.shape[0:2] min_dim = min(y, x) start_x = (x // 2) - (min_dim // 2) start_y = (y // 2) - (min_dim // 2) return frame[start_y:start_y+min_dim,start_x:start_x+min_dim] def load_video(video_url, max_frames=32, resize=(224, 224)): path = tf.keras.utils.get_file(os.path.basename(video_url)[-128:], video_url) cap = cv2.VideoCapture(path) frames = [] try: while True: ret, frame = cap.read() if not ret: break frame = crop_center_square(frame) frame = cv2.resize(frame, resize) frame = frame[:, :, [2, 1, 0]] frames.append(frame) if len(frames) == max_frames: break finally: cap.release() frames = np.array(frames) if len(frames) < max_frames: n_repeat = int(math.ceil(max_frames / float(len(frames)))) frames = frames.repeat(n_repeat, axis=0) frames = frames[:max_frames] return frames / 255.0 def display_video(urls): html = '<table>' html += '<tr><th>Video 1</th><th>Video 2</th><th>Video 3</th></tr><tr>' for url in urls: html += '<td>' html += '<img src="{}" height="224">'.format(url) html += '</td>' html += '</tr></table>' return display.HTML(html) def display_query_and_results_video(query, urls, scores): """Display a text query and the top result videos and scores.""" sorted_ix = np.argsort(-scores) html = '' html += '<h2>Input query: <i>{}</i> </h2><div>'.format(query) html += 'Results: <div>' html += '<table>' html += '<tr><th>Rank #1, Score:{:.2f}</th>'.format(scores[sorted_ix[0]]) html += '<th>Rank #2, Score:{:.2f}</th>'.format(scores[sorted_ix[1]]) html += '<th>Rank #3, Score:{:.2f}</th></tr><tr>'.format(scores[sorted_ix[2]]) for i, idx in enumerate(sorted_ix): url = urls[sorted_ix[i]]; html += '<td>' html += '<img src="{}" height="224">'.format(url) html += '</td>' html += '</tr></table>' return html # @title Load example videos and define text queries { display-mode: "form" } video_1_url = 'https://upload.wikimedia.org/wikipedia/commons/b/b0/YosriAirTerjun.gif' # @param {type:"string"} video_2_url = 'https://upload.wikimedia.org/wikipedia/commons/e/e6/Guitar_solo_gif.gif' # @param {type:"string"} video_3_url = 'https://upload.wikimedia.org/wikipedia/commons/3/30/2009-08-16-autodrift-by-RalfR-gif-by-wau.gif' # @param {type:"string"} video_1 = load_video(video_1_url) video_2 = load_video(video_2_url) video_3 = load_video(video_3_url) all_videos = [video_1, video_2, video_3] query_1_video = 'waterfall' # @param {type:"string"} query_2_video = 'playing guitar' # @param {type:"string"} query_3_video = 'car drifting' # @param {type:"string"} all_queries_video = [query_1_video, query_2_video, query_3_video] all_videos_urls = [video_1_url, video_2_url, video_3_url] display_video(all_videos_urls) ``` ## Demonstrate text to video retrieval ``` # Prepare video inputs. videos_np = np.stack(all_videos, axis=0) # Prepare text input. words_np = np.array(all_queries_video) # Generate the video and text embeddings. video_embd, text_embd = generate_embeddings(hub_model, videos_np, words_np) # Scores between video and text is computed by dot products. all_scores = np.dot(text_embd, tf.transpose(video_embd)) # Display results. html = '' for i, words in enumerate(words_np): html += display_query_and_results_video(words, all_videos_urls, all_scores[i, :]) html += '<br>' display.HTML(html) ```
github_jupyter
## Plots of SST for S-MODE region ``` cd C:\users\jtomf\Documents\Python\S-MODE_analysis\code import xarray as xr import numpy as np import matplotlib.pyplot as plt import matplotlib import cftime import cartopy.crs as ccrs # import projections import cartopy import gsw # For great circle distance between two points, use gsw.geostrophy.distance(lon, lat, p=0, axis=-1) import functions # requires functions.py from this repository # %matplotlib inline %matplotlib qt5 plt.rcParams['figure.figsize'] = (6,5) plt.rcParams['figure.dpi'] = 300 plt.rcParams['savefig.dpi'] = 700 plt.close('all') __figdir__ = '../plots/' + 'SMODE_' savefig_args = {'bbox_inches':'tight', 'pad_inches':0.2} plotfiletype='png' savefig = True zoom = True if zoom: xmin, xmax = (-126,-122) ymin, ymax = (36,39) levels = np.linspace(13.5,15.5,21)+.5 zoom_str='zoom' else: xmin, xmax = (-126,-121) ymin, ymax = (35, 39) levels = np.linspace(12.5,16,21)+0 zoom_str='wide' def plot_ops_area(ax,**kwargs): """ Add polygon to show S-MODE pilot operations area Inputs - matplotlib.pyplot.plot kwargs return - exit code (True if OK) """ # Add S-MODE pilot operations area ''' New corners of pentagon: 38° 05.500’ N, 125° 22.067’ W 37° 43.000’ N, 124° 00.067’ W 37° 45.000’ N, 123° 26.000‘ W 36° 58.000’ N, 122° 57.000’ W 36° 20.000’ N, 124° 19.067’ W ''' coord = [[-(125+22.067/60),38+5.5/60], [-(124+0.067/60),37+43/60], [-(123+26/60),37+45/60], [-(122+57/60),36+58/60], [-(124+19.067/60),36+20/60]] coord.append(coord[0]) #repeat the first point to create a 'closed loop' xs, ys = zip(*coord) #create lists of x and y values if ax is None: ax = plt.gca() # ax.plot(xs,ys,transform=ccrs.PlateCarree()) ax.plot(xs,ys,**kwargs) SF_lon=-(122+25/60) SF_lat= 37+47/60 # mark a known place to help us geo-locate ourselves ax.plot(SF_lon, SF_lat, 'o', markersize=3, zorder=10, **kwargs) ax.text(SF_lon-5/60, SF_lat+5/60, 'San Francisco', fontsize=8, zorder=10, **kwargs) # ax.text(np.mean(xs)-.6, np.mean(ys)-.3, 'S-MODE ops area', fontsize=8, **kwargs) print(kwargs) return(xs,ys,ax) #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/AVHRR_METOPB/AVHRR_METOPB_20210930T060000Z.nc' #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/MODIS_Terra/MODIS_Terra_20210930T065001Z.nc'3 # url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20210929T213000Z.nc' #This one is good #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211016T095000Z.nc' # good one # url = 'http://smode.whoi.edu:8080/thredds/fileServer/satellite/AVHRR_METOPA/AVHRR_METOPA_20211019T031000Z.nc#bytes' #Really great! # url = 'http://smode.whoi.edu:8080/thredds/fileServer/satellite/VIIRS_NRT/VIIRS_NRT_20211019T103001Z.nc#mode=bytes' #Not awesome, but coincident with B200 flight on 10/19 #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/MODIS_Terra/MODIS_Terra_20211011T233459Z.nc' # url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211022T111000Z.nc' #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/AVHRR_METOPA/AVHRR_METOPA_20211022T161000Z.nc' # good one! #Best sequence #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/AVHRR_METOPC/AVHRR_METOPC_20211019T052000Z.nc' # prettiest of all!! #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211025T214001Z.nc' # url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/AVHRR_METOPC/AVHRR_METOPC_20211025T050000Z.nc' #not good, but looking for front... # url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211025T102000Z.nc' # url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211025T214001Z.nc' #url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/AVHRR_METOPA/AVHRR_METOPA_20211028T033000Z.nc' url = 'http://smode.whoi.edu:8080/thredds/dodsC/satellite/VIIRS_NRT/VIIRS_NRT_20211104T103001Z.nc' ds = xr.open_dataset(url) ds fig = plt.figure() ax = plt.axes(projection = ccrs.PlateCarree(central_longitude=200)) # Orthographic extent = [xmin, xmax, ymin, ymax] ax.set_extent(extent, crs=ccrs.PlateCarree()) '''daystr=ds.time.dt.day.astype(str).values[0] monstr=ds.time.dt.month.astype(str).values[0] yrstr=ds.time.dt.year.astype(str).values[0] day_str = monstr+'-'+daystr+'-'+yrstr ''' day_str=ds.time.dt.strftime("%a, %b %d %H:%M").values[0] day_str2=ds.time.dt.strftime("%m-%d-%Y").values[0] ax.set_title('SST, ' + day_str, size = 10.) #plt.set_cmap(cmap=plt.get_cmap('nipy_spectral')) plt.set_cmap(cmap=plt.get_cmap('turbo')) gl = ax.gridlines(draw_labels=True, dms=True, x_inline=False, y_inline=False, alpha=0.5, linestyle='--') gl.top_labels = False gl.ylabels_right = False #gl.xlocator = matplotlib.ticker.MaxNLocator(10) #gl.xlocator = matplotlib.ticker.AutoLocator # gl.xlocator = matplotlib.ticker.FixedLocator(np.arange(0, 360 ,30)) ax.coastlines() ax.add_feature(cartopy.feature.LAND, zorder=3, facecolor=[.6,.6,.6], edgecolor='black') # cs = ax.contourf(ds.lon,ds.lat,np.squeeze(ds.sea_surface_temperature)-273.15, levels, extend='both', transform=ccrs.PlateCarree()) cs = ax.pcolormesh(ds.lon,ds.lat,np.squeeze(ds.sea_surface_temperature)-273.15, vmin=levels[0], vmax=levels[-1], transform=ccrs.PlateCarree()) # cb = plt.colorbar(cs,ax=ax,shrink=.8,pad=.05) cb = plt.colorbar(cs,fraction = 0.022,extend='both') cb.set_label('SST [$\circ$C]',fontsize = 10) plot_ops_area(ax,transform=ccrs.PlateCarree(),color='w') # Add a 10 km scale bar km_per_deg_lat=gsw.geostrophy.distance((125,125), (37,38))/1000 deg_lat_equal_10km=10/km_per_deg_lat x0 = -125 y0 = 37.75 ax.plot(x0+np.asarray([0, 0]),y0+np.asarray([0.,deg_lat_equal_10km]),transform=ccrs.PlateCarree(),color='k') ax.text(x0+2/60, y0-.5/60, '10 km', fontsize=6,transform=ccrs.PlateCarree()) # saildrones.plot('longitude','latitude','scatter',markersize=3,transform=ccrs.PlateCarree()) #ax.plot(saildrones['longitude'],saildrones['latitude'],'ko',markersize=3,transform=ccrs.PlateCarree()) saildrones=functions.get_current_position('saildrone')#,'ko', markersize=3,transform=ccrs.PlateCarree()) ax.plot(saildrones['longitude'],saildrones['latitude'],'ko',markersize=1,transform=ccrs.PlateCarree()) wg=functions.get_current_position('waveglider')#,'ko', markersize=3,transform=ccrs.PlateCarree()) hWG = ax.plot(wg['longitude'],wg['latitude'],'mo',markersize=1,transform=ccrs.PlateCarree()) if savefig: plt.savefig(__figdir__+'SST_' + day_str2 + '_' + zoom_str + 'pretty2' +'.' +plotfiletype,**savefig_args) pwd ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import gc import os import re import pickle import sklearn import sys import string from sklearn.decomposition import LatentDirichletAllocation from sklearn.metrics import f1_score, precision_score, recall_score from sklearn.metrics.pairwise import cosine_similarity,cosine_distances from sklearn.model_selection import cross_val_score, GridSearchCV,ParameterGrid, train_test_split from sklearn.multiclass import OneVsRestClassifier from sklearn.preprocessing import MultiLabelBinarizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer,TfidfVectorizer from sklearn.pipeline import Pipeline from sklearn.neighbors import KNeighborsClassifier,NearestNeighbors from sklearn.svm import LinearSVC from tqdm import * %matplotlib inline %load_ext autoreload %autoreload 1 src_dir = os.path.join(os.getcwd(), os.pardir, '../src') sys.path.append(src_dir) %aimport data.movielens_20m_imdb %aimport helpers.labels,helpers.neighbours from data.movielens_20m_imdb import load_or_get_from_cache from helpers.labels import truncate_labels from helpers.neighbours import get_predicted_labels_from_neighbours INTERIM_DATA_ROOT = os.path.abspath("../../data/interim/movielens-ml20m-imdb/") ML_ROOT = "/media/felipe/SAMSUNG/movielens/ml-20m/" IMDB_ROOT = "/media/felipe/SAMSUNG/imdb/" PATH_TO_MOVIES = ML_ROOT + "/movies.csv" PATH_TO_TAG_ASSIGNMENTS = ML_ROOT + "/tags.csv" PATH_TO_MOVIE_PLOTS = IMDB_ROOT+"/plot.list" # CONFIGS MAX_NB_WORDS = 4000 NB_NEIGHBOURS = 3 DISTANCE_METRIC='cosine' WEIGHTS='distance' PREPROC=None STOP_WORDS='english' NB_COMPONENTS = 30 docs_df = load_or_get_from_cache(PATH_TO_MOVIES,PATH_TO_TAG_ASSIGNMENTS,PATH_TO_MOVIE_PLOTS,INTERIM_DATA_ROOT) data = docs_df['plot'].values labelsets = docs_df["unique_tags"].map(lambda tagstring: tagstring.split(",")).values mlb = MultiLabelBinarizer() mlb.fit(labelsets) # I can't put this into a pipeline because NearestNeighbors is not a normal classifier, I think # I need to customize the pipeline object to be able to call the methods for that class. vect = CountVectorizer(max_features=MAX_NB_WORDS, preprocessor=PREPROC, stop_words=STOP_WORDS) # arsg taken from http://scikit-learn.org/stable/auto_examples/applications/plot_topics_extraction_with_nmf_lda.html lda = LatentDirichletAllocation(n_components=NB_COMPONENTS, max_iter=5, learning_method='online', learning_offset=50.) nbrs = NearestNeighbors(n_neighbors=NB_NEIGHBOURS, metric=DISTANCE_METRIC) X_train, X_test, y_train, y_test = train_test_split(data,labelsets,test_size=0.25) y_train = mlb.transform(y_train) y_test = mlb.transform(y_test) # train X_train = vect.fit_transform(X_train) X_train = lda.fit_transform(X_train) nbrs.fit(X_train) # test X_test = vect.transform(X_test) X_test = lda.transform(X_test) X_train.shape,X_test.shape y_train.shape,y_test.shape y_preds = [] y_trues = [] distances_matrix, indices_matrix = nbrs.kneighbors(X_test) neighbour_labels_tensor = y_train[indices_matrix] distances_matrix.shape, indices_matrix.shape, neighbour_labels_tensor.shape for i in tqdm(range(distances_matrix.shape[0])): distances = distances_matrix[i].ravel() neighbour_labels = neighbour_labels_tensor[i] y_pred = get_predicted_labels_from_neighbours(neighbour_labels, distances) y_true = y_test[i] y_preds.append(y_pred) y_trues.append(y_true) y_preds = np.array(y_preds) y_trues = np.array(y_trues) f1_score(y_trues,y_preds,average='micro') def print_top_words(model, feature_names, n_top_words): for topic_idx, topic in enumerate(model.components_): message = "Topic #%d: " % topic_idx message += " ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) print(message) print() tf_feature_names = vect.get_feature_names() print_top_words(lda, tf_feature_names, 10) ```
github_jupyter
# Assignment 11 Consider the reservoir shown below with the given properties that has been discretized into 4 equal grid blocks. ![image](images/grid.png) Below is a skeleton of a Python class that can be used to solve for the pressures in the reservoir. The class is actually written generally enough that it can account for an arbitrary number of grid blocks, but we will only test cases with 4. The class takes a Python dictonary of input parameters as an initialization argument. An example of a complete set of input parameters is shown in the `setup()` function of the tests below. Several simple useful functions are already implemented, your task is to implement the functions `compute_transmisibility()`, `compute_accumulation()`, `fill_matrices()` and `solve_one_step()`. `fill_matrices()` should correctly populate the $\mathbf{T}$, $\mathbf{B}$ matrices as well as the vector $\vec{Q}$. These should also correctly account for the application of boundary conditions. Only the boundary conditions shown in the figure will be tested, but in preparation for future assignments, you may wish to add the logic to the code such that arbitrary pressure/no flow boundary conditions can be applied to either side of the one-dimensional reservoir. You may need to use the `'conversion factor'` for the transmissibilities. `solve_one_step()` should solve a single time step for either the explicit or implicit methods depending on which is specified in the input parameters. The $\vec{p}{}^{n+1}$ values should be stored in the class attribute `self.p`. If this is implemented correctly, you will be able to then use the `solve()` function to solve the problem up to the `'number of time steps'` value in the input parameters. This time, in preparation for solving much larger systems of equations in the future, use the `scipy.sparse` module to create sparse matrix data structures for $\mathbf{T}$ and $\mathbf{B}$. The sparsity of the matrix $\mathbf{T}$ is tested, so please assign this matrix to a class attribute named exactly `T`. Use `scipy.sparse.linalg.spsolve()` for the linear solution of the `'implicit'` method implementation. Once you have the tests passing, you might like to experiment with viewing several plots with different time steps, explicit vs. implicit, number of grid blocks, etc. To assist in giving you a feel for how they change the character of the approximate solution. I have implemented a simple plot function that might help for this. ``` import numpy as np import yaml import scipy.sparse import scipy.sparse.linalg import matplotlib.pyplot as plt class OneDimReservoir(): def __init__(self, inputs): ''' Class for solving one-dimensional reservoir problems with finite differences. ''' #stores input dictionary as class attribute, either read from a yaml file #or directly from a Python dictonary if isinstance(inputs, str): with open(inputs) as f: self.inputs = yaml.load(f, yaml.FullLoader) else: self.inputs = inputs #computes delta_x self.Nx = self.inputs['numerical']['number of grids']['x'] self.N = self.Nx self.delta_x = self.inputs['reservoir']['length'] / float(self.Nx) #gets delta_t from inputs self.delta_t = self.inputs['numerical']['time step'] #applies the initial reservoir pressues to self.p self.apply_initial_conditions() #calls fill matrix method (must be completely implemented to work) self.fill_matrices() #create an empty list for storing data if plots are requested if 'plots' in self.inputs: self.p_plot = [] return def compute_transmissibility(self): ''' Computes the transmissibility. ''' # Complete implementation here return def compute_accumulation(self): ''' Computes the accumulation. ''' # Complete implementation here return def fill_matrices(self): ''' Fills the matrices T, B, and \vec{Q} and applies boundary conditions. ''' # Complete implementation here return def apply_initial_conditions(self): ''' Applies initial pressures to self.p ''' N = self.N self.p = np.ones(N) * self.inputs['initial conditions']['pressure'] return def solve_one_step(self): ''' Solve one time step using either the implicit or explicit method ''' # Complete implementation here return def solve(self): ''' Solves until "number of time steps" ''' for i in range(self.inputs['numerical']['number of time steps']): self.solve_one_step() if i % self.inputs['plots']['frequency'] == 0: self.p_plot += [self.get_solution()] return def plot(self): ''' Crude plotting function. Plots pressure as a function of grid block # ''' if self.p_plot is not None: for i in range(len(self.p_plot)): plt.plot(self.p_plot[i]) return def get_solution(self): ''' Returns solution vector ''' return self.p ``` # Example code execution If you'd like to run your code in the notebook, perhaps creating a crude plot of the output, you can uncomment the following lines of code in the cell below. You can also inspect the contents of `inputs.yml` and change the parameters to see how the solution is affected. ``` #import matplotlib.pyplot as plt #implicit = OneDimReservoir('inputs.yml') #implicit.solve() #implicit.plot() ```
github_jupyter
<a href="https://colab.research.google.com/github/thomascong121/SocialDistance/blob/master/model_camera_colibration.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` from google.colab import drive drive.mount('/content/drive') %%capture !pip install gluoncv !pip install mxnet-cu101 import gluoncv from gluoncv import model_zoo, data, utils from matplotlib import pyplot as plt import numpy as np from collections import defaultdict from mxnet import nd import mxnet as mx from skimage import io import cv2 import os from copy import deepcopy from tqdm import tqdm !ls '/content/drive/My Drive/social distance/0.png' !nvidia-smi !nvcc --version img_path = '/content/drive/My Drive/social distance/0.png' img = io.imread(img_path) video_path = '/content/drive/My Drive/social distance/TownCentreXVID.avi' io.imshow(img) io.show() class Bird_eye_view_Transformer: def __init__(self, keypoints, keypoints_birds_eye_view, actual_length, actual_width): ''' keypoints input order 0 1 3 2 ''' self.keypoint = np.float32(keypoints) self.keypoints_birds_eye_view = np.float32(keypoints_birds_eye_view) self.M = cv2.getPerspectiveTransform(self.keypoint, self.keypoints_birds_eye_view) self.length_ratio = actual_width/(keypoints_birds_eye_view[3][1] - keypoints_birds_eye_view[0][1]) self.width_ratio = actual_length/(keypoints_birds_eye_view[1][0] - keypoints_birds_eye_view[0][0]) print(self.length_ratio, self.width_ratio) def imshow(self, img): dst_img = cv2.warpPerspective(img, self.M, (img.shape[1], img.shape[0])) plt.imshow(dst_img) plt.show() def __call__(self, points): h = points.shape[0] points = np.concatenate([points, np.ones((h, 1))], axis = 1) temp = self.M.dot(points.T) return (temp[:2]/temp[2]).T def distance(self, p0, p1): return ((p0[0] - p1[0])*self.width_ratio)**2 \ + ((p0[1] - p1[1])*self.length_ratio)**2 keypoints = [(1175, 189), (1574, 235), (976, 831), (364, 694)] keypoints_birds_eye_view = [(700, 400), (1200, 400), (1200, 900), (700, 900)] actual_length = 10 actual_width = 5 transformer = Bird_eye_view_Transformer(keypoints, keypoints_birds_eye_view, actual_length, actual_width) transformer.imshow(img) ''' step0 install gluoncv pip install --upgrade mxnet gluoncv ''' class Model_Zoo: def __init__(self,selected_model, transformer, device): self.device = device self.transformer = transformer self.net = model_zoo.get_model(selected_model, pretrained=True, ctx = self.device) def __call__(self,image,display=False): '''get bbox for input image''' image = nd.array(image) x, orig_img = data.transforms.presets.yolo.transform_test(image) self.shape = orig_img.shape[:2] self.benchmark = max(orig_img.shape[:2]) x = x.copyto(self.device) box_ids, scores, bboxes = self.net(x) bboxes = bboxes * (image.shape[0]/orig_img.shape[0]) person_index = [] #check person class for i in range(box_ids.shape[1]): if box_ids[0][i][0] == 14 and scores[0][i][0] > 0.7: person_index.append(i) #select bbox of person #p1:bbox id of person #p2:confidence score #p3:bbox location # print('======{0} bbox of persons are detected===='.format(len(person_index))) p1,p2,p3 = box_ids[0][[person_index],:],scores[0][[person_index],:],bboxes[0][[person_index],:] #calaulate bbox coordinate bbox_center = self.bbox_center(p3) #img with bbox img_with_bbox = utils.viz.cv_plot_bbox(image.astype('uint8'), p3[0], p2[0], p1[0], colors={14: (0,255,0)},class_names = self.net.classes, linewidth=1) result_img = self.bbox_distance(bbox_center,img_with_bbox) if display: plt.imshow(result_img) plt.show() return result_img, p1, p2, p3, bbox_center def show(self, img, p1, p2, p3, bbox_center, resize = None): if resize is not None: img = mx.image.imresize(nd.array(img).astype('uint8'), self.shape[1], self.shape[0]) else: img = nd.array(img).astype('uint8') img_with_bbox = utils.viz.cv_plot_bbox(img, p3[0], p2[0], p1[0], colors={14: (0,255,0)},class_names = self.net.classes, linewidth=1) return self.bbox_distance(bbox_center,img_with_bbox) def bbox_center(self,bbox_location): '''calculate center coordinate for each bbox''' rst = None for loc in range(bbox_location[0].shape[0]): (xmin, ymin, xmax, ymax) = bbox_location[0][loc].copyto(mx.cpu()) center_x = (xmin+xmax)/2 center_y = ymax if rst is not None: rst = nd.concatenate([rst, nd.stack(center_x, center_y, axis = 1)]) else: rst = nd.stack(center_x, center_y, axis = 1) return rst.asnumpy() def bbox_distance(self,bbox_coord,img, max_detect = 4, safe=2): ''' calculate distance between each bbox, if distance < safe, draw a red line ''' #draw the center safe = safe**2 max_detect = max_detect**2 for coor in range(len(bbox_coord)): cv2.circle(img,(int(bbox_coord[coor][0]),int(bbox_coord[coor][1])),5,(0, 0, 255),-1) bird_eye_view = self.transformer(deepcopy(bbox_coord)) # print(bird_eye_view) # self.transformer.imshow(img) for i in range(0, len(bbox_coord)): for j in range(i+1, len(bbox_coord)): dist = self.transformer.distance(bird_eye_view[i], bird_eye_view [j]) # print(bird_eye_view[i], bird_eye_view [j],dist) if dist < safe: cv2.line(img,(bbox_coord[i][0],bbox_coord[i][1]),(bbox_coord[j][0],bbox_coord[j][1]),(255, 0, 0), 2) elif dist < max_detect: cv2.line(img,(bbox_coord[i][0],bbox_coord[i][1]),(bbox_coord[j][0],bbox_coord[j][1]),(0, 255, 0), 2) return img pretrained_models = 'yolo3_darknet53_voc' detect_model = Model_Zoo(pretrained_models, transformer, mx.gpu()) %%time rst = detect_model(img,display=True) rst class Detector: def __init__(self, model, save_path = './detections', batch_size = 60, interval = None): self.detector = model self.save_path = save_path self.interval = interval self.batch_size = batch_size def __call__(self, filename): v_cap = cv2.VideoCapture(filename) v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT)) frame_size = (v_cap.get(cv2.CAP_PROP_FRAME_WIDTH), v_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'MP4V') fps = v_cap.get(cv2.CAP_PROP_FPS) if not os.path.exists(self.save_path): os.mkdir(self.save_path) print(f'{self.save_path}/{filename.split("/")[-1]}') out = cv2.VideoWriter(f'{self.save_path}/{filename.split("/")[-1]}', fourcc, fps,\ (int(frame_size[0]), int(frame_size[1]))) if self.interval is None: sample = np.arange(0, v_len) else: sample = np.arange(0, v_len, self.interval) frame = p1 = p2 = p3 = bbox_center =None for i in tqdm(range(v_len)): success = v_cap.grab() success, frame = v_cap.retrieve() if not success: continue frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if i in sample: frame, p1, p2, p3, bbox_center = self.detector(frame) else: frame = self.detector.show(frame, p1, p2, p3, bbox_center) # plt.imshow(frame) # plt.show() frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) out.write(frame) v_cap.release() return out detector = Detector(detect_model, interval = 10) %%time detector(video_path) !ls ./detections ```
github_jupyter
<a href="https://colab.research.google.com/github/scifiswapnil/DeepLearningExperiments/blob/master/CNN/Exercise3_transferlearning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Import all the necessary files! import os import tensorflow as tf from tensorflow.keras import layers from tensorflow.keras import Model # Download the inception v3 weights !wget --no-check-certificate \ https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \ -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 # Import the inception model from tensorflow.keras.applications.inception_v3 import InceptionV3 # Create an instance of the inception model from the local pre-trained weights local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5' pre_trained_model = InceptionV3(input_shape = (150, 150, 3), include_top = False, weights = None) pre_trained_model.load_weights(local_weights_file) # Make all the layers in the pre-trained model non-trainable for layer in pre_trained_model.layers: layer.trainable = False # Print the model summary pre_trained_model.summary() # Expected Output is extremely large, but should end with: #batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0] #__________________________________________________________________________________________________ #activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0] #__________________________________________________________________________________________________ #mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0] # activation_276[0][0] #__________________________________________________________________________________________________ #concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0] # activation_280[0][0] #__________________________________________________________________________________________________ #activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0] #__________________________________________________________________________________________________ #mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0] # mixed9_1[0][0] # concatenate_5[0][0] # activation_281[0][0] #================================================================================================== #Total params: 21,802,784 #Trainable params: 0 #Non-trainable params: 21,802,784 last_layer = pre_trained_model.get_layer('mixed7') print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output # Expected Output: # ('last layer output shape: ', (None, 7, 7, 768)) # Define a Callback class that stops training once accuracy reaches 99.9% class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('acc')>0.999): print("\nReached 99.9% accuracy so cancelling training!") self.model.stop_training = True from tensorflow.keras.optimizers import RMSprop # Flatten the output layer to 1 dimension x = layers.Flatten()(last_output) # Add a fully connected layer with 1,024 hidden units and ReLU activation x = layers.Dense(1024, activation='relu')(x) # Add a dropout rate of 0.2 x = layers.Dropout(0.2)(x) # Add a final sigmoid layer for classification x = layers.Dense (1, activation='sigmoid')(x) model = Model( pre_trained_model.input, x) model.compile(optimizer = RMSprop(lr=0.0001), loss = 'binary_crossentropy', metrics = ['acc']) model.summary() # Expected output will be large. Last few lines should be: # mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_248[0][0] # activation_251[0][0] # activation_256[0][0] # activation_257[0][0] # __________________________________________________________________________________________________ # flatten_4 (Flatten) (None, 37632) 0 mixed7[0][0] # __________________________________________________________________________________________________ # dense_8 (Dense) (None, 1024) 38536192 flatten_4[0][0] # __________________________________________________________________________________________________ # dropout_4 (Dropout) (None, 1024) 0 dense_8[0][0] # __________________________________________________________________________________________________ # dense_9 (Dense) (None, 1) 1025 dropout_4[0][0] # ================================================================================================== # Total params: 47,512,481 # Trainable params: 38,537,217 # Non-trainable params: 8,975,264 # Get the Horse or Human dataset !wget --no-check-certificate https://storage.googleapis.com/laurencemoroney-blog.appspot.com/horse-or-human.zip -O /tmp/horse-or-human.zip # Get the Horse or Human Validation dataset !wget --no-check-certificate https://storage.googleapis.com/laurencemoroney-blog.appspot.com/validation-horse-or-human.zip -O /tmp/validation-horse-or-human.zip from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import zipfile local_zip = '//tmp/horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/training') zip_ref.close() local_zip = '//tmp/validation-horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp/validation') zip_ref.close() train_dir = '/tmp/training' validation_dir = '/tmp/validation' train_horses_dir = os.path.join(train_dir, 'horses') # Directory with our training horse pictures train_humans_dir = os.path.join(train_dir, 'humans') # Directory with our training humans pictures validation_horses_dir = os.path.join(validation_dir, 'horses') # Directory with our validation horse pictures validation_humans_dir = os.path.join(validation_dir, 'humans')# Directory with our validation humanas pictures train_horses_fnames = os.listdir(train_horses_dir) train_humans_fnames = os.listdir(train_humans_dir) validation_horses_fnames = os.listdir(validation_horses_dir) validation_humans_fnames = os.listdir(validation_humans_dir) print(len(train_horses_fnames)) print(len(train_humans_fnames)) print(len(validation_horses_fnames)) print(len(validation_humans_fnames)) # Expected Output: # 500 # 527 # 128 # 128 # Define our example directories and files train_dir = '/tmp/training' validation_dir = '/tmp/validation' # Add our data-augmentation parameters to ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255., rotation_range = 40, width_shift_range = 0.2, height_shift_range = 0.2, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) # Note that the validation data should not be augmented! test_datagen = ImageDataGenerator( rescale = 1.0/255. ) # Flow training images in batches of 20 using train_datagen generator train_generator = train_datagen.flow_from_directory(train_dir, batch_size = 100, class_mode = 'binary', target_size = (150, 150)) # Flow validation images in batches of 20 using test_datagen generator validation_generator = test_datagen.flow_from_directory( validation_dir, batch_size = 100, class_mode = 'binary', target_size = (150, 150)) # Expected Output: # Found 1027 images belonging to 2 classes. # Found 256 images belonging to 2 classes. # Run this and see how many epochs it should take before the callback # fires, and stops training at 99.9% accuracy # (It should take less than 100 epochs) callbacks = myCallback() history = model.fit_generator( train_generator, validation_data = validation_generator, steps_per_epoch = 10, epochs = 10, validation_steps = 10, verbose = 2, callbacks=[callbacks]) import matplotlib.pyplot as plt acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(len(acc)) plt.plot(epochs, acc, 'r', label='Training accuracy') plt.plot(epochs, val_acc, 'b', label='Validation accuracy') plt.title('Training and validation accuracy') plt.legend(loc=0) plt.figure() plt.show() ```
github_jupyter
# Azure Machine Learning Setup To begin, you will need to provide the following information about your Azure Subscription. **If you are using your own Azure subscription, please provide names for subscription_id, resource_group, workspace_name and workspace_region to use.** Note that the workspace needs to be of type [Machine Learning Workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/setup-create-workspace). **If an enviorment is provided to you be sure to replace XXXXX in the values below with your unique identifier.** In the following cell, be sure to set the values for `subscription_id`, `resource_group`, `workspace_name` and `workspace_region` as directed by the comments (*these values can be acquired from the Azure Portal*). To get these values, do the following: 1. Navigate to the Azure Portal and login with the credentials provided. 2. From the left hand menu, under Favorites, select `Resource Groups`. 3. In the list, select the resource group with the name similar to `XXXXX`. 4. From the Overview tab, capture the desired values. Execute the following cell by selecting the `>|Run` button in the command bar above. ``` #Provide the Subscription ID of your existing Azure subscription subscription_id = "" #"<your-azure-subscription-id>" #Provide a name for the Resource Group that will contain Azure ML related services resource_group = "mcw-ai-lab-XXXXX" #"<your-subscription-group-name>" # Provide the name and region for the Azure Machine Learning Workspace that will be created workspace_name = "mcw-ai-lab-ws-XXXXX" workspace_region = "eastus" # eastus2, eastus, westcentralus, southeastasia, australiaeast, westeurope ``` ## Create and connect to an Azure Machine Learning Workspace The Azure Machine Learning Python SDK is required for leveraging the experimentation, model management and model deployment capabilities of Azure Machine Learning services. Run the following cell to create a new Azure Machine Learning **Workspace** and save the configuration to disk. The configuration file named `config.json` is saved in a folder named `.azureml`. **Important Note**: You will be prompted to login in the text that is output below the cell. Be sure to navigate to the URL displayed and enter the code that is provided. Once you have entered the code, return to this notebook and wait for the output to read `Workspace configuration succeeded`. ``` import azureml.core print('azureml.core.VERSION: ', azureml.core.VERSION) # import the Workspace class and check the azureml SDK version from azureml.core import Workspace ws = Workspace.create( name = workspace_name, subscription_id = subscription_id, resource_group = resource_group, location = workspace_region, exist_ok = True) ws.write_config() print('Workspace configuration succeeded') ``` Take a look at the contents of the generated configuration file by running the following cell: ``` !cat .azureml/config.json ``` # Deploy model to Azure Container Instance (ACI) In this section, you will deploy a web service that uses Gensim as shown in `01 Summarize` to summarize text. The web service will be hosted in Azure Container Service. ## Create the scoring web service When deploying models for scoring with Azure Machine Learning services, you need to define the code for a simple web service that will load your model and use it for scoring. By convention this service has two methods init which loads the model and run which scores data using the loaded model. This scoring service code will later be deployed inside of a specially prepared Docker container. ``` %%writefile summarizer_service.py import re import nltk import unicodedata from gensim.summarization import summarize, keywords def clean_and_parse_document(document): if isinstance(document, str): document = document elif isinstance(document, unicode): return unicodedata.normalize('NFKD', document).encode('ascii', 'ignore') else: raise ValueError("Document is not string or unicode.") document = document.strip() sentences = nltk.sent_tokenize(document) sentences = [sentence.strip() for sentence in sentences] return sentences def summarize_text(text, summary_ratio=None, word_count=30): sentences = clean_and_parse_document(text) cleaned_text = ' '.join(sentences) summary = summarize(cleaned_text, split=True, ratio=summary_ratio, word_count=word_count) return summary def init(): nltk.download('all') return def run(input_str): try: return summarize_text(input_str) except Exception as e: return (str(e)) ``` ## Create a Conda dependencies environment file Your web service can have dependencies installed by using a Conda environment file. Items listed in this file will be conda or pip installed within the Docker container that is created and thus be available to your scoring web service logic. ``` from azureml.core.conda_dependencies import CondaDependencies myacienv = CondaDependencies.create(pip_packages=['gensim','nltk']) with open("mydeployenv.yml","w") as f: f.write(myacienv.serialize_to_string()) ``` ## Deployment In the following cells you will use the Azure Machine Learning SDK to package the model and scoring script in a container, and deploy that container to an Azure Container Instance. Run the following cells. ``` from azureml.core.webservice import AciWebservice, Webservice aci_config = AciWebservice.deploy_configuration( cpu_cores = 1, memory_gb = 1, tags = {'name':'Summarization'}, description = 'Summarizes text.') ``` Next, build up a container image configuration that names the scoring service script, the runtime, and provides the conda file. ``` service_name = "summarizer" runtime = "python" driver_file = "summarizer_service.py" conda_file = "mydeployenv.yml" from azureml.core.image import ContainerImage image_config = ContainerImage.image_configuration(execution_script = driver_file, runtime = runtime, conda_file = conda_file) ``` Now you are ready to begin your deployment to the Azure Container Instance. Run the following cell. This may take between 5-15 minutes to complete. You will see output similar to the following when your web service is ready: `SucceededACI service creation operation finished, operation "Succeeded"` ``` webservice = Webservice.deploy( workspace=ws, name=service_name, model_paths=[], deployment_config=aci_config, image_config=image_config, ) webservice.wait_for_deployment(show_output=True) ``` ## Test the deployed service Now you are ready to test scoring using the deployed web service. The following cell invokes the web service. Run the following cells to test scoring using a single input row against the deployed web service. ``` example_document = """ I was driving down El Camino and stopped at a red light. It was about 3pm in the afternoon. The sun was bright and shining just behind the stoplight. This made it hard to see the lights. There was a car on my left in the left turn lane. A few moments later another car, a black sedan pulled up behind me. When the left turn light changed green, the black sedan hit me thinking that the light had changed for us, but I had not moved because the light was still red. After hitting my car, the black sedan backed up and then sped past me. I did manage to catch its license plate. The license plate of the black sedan was ABC123. """ result = webservice.run(input_data = example_document) print(result) ``` ## Capture the scoring URI In order to call the service from a REST client, you need to acquire the scoring URI. Run the following cell to retrieve the scoring URI and take note of this value, you will need it in the last notebook. ``` webservice.scoring_uri ``` The default settings used in deploying this service result in a service that does not require authentication, so the scoring URI is the only value you need to call this service.
github_jupyter
<a href="https://colab.research.google.com/github/jereyel/LinearAlgebra/blob/main/Assignment2_DelosReyes.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Welcome to Python Fundamentals In this module, we are going to establish our skills in Python Programming. In this notebook we are going to cover: * Variables and Data Types * Operations * Input and Output Operations * Iterables * Functions ## Variables and Data Types ``` x = 1 a, b = 3, -2 type (x) y = 3.0 type(y) x = float(x) type(x) s, t, u = "1", '3', 'three' type(s) ``` ## Operations ### Arithmetic ``` w, x, y, z = 4.0, -3.0, 1, -32 ### Addition S = w + x ### Subtractions D = y - z ### Multiplication P = w*z ### Division Q = y/x ### Floor Division Qf = w//z Qf ### Exponentiation E = w**w E ### Modulo mod = z%x mod ``` ### Assignment ``` A, B, C, D, E = 0, 100, 2, 1, 2 A += w B -= x C *= w D /= x E **= y E ``` ### Comparators ``` size_1, size_2, size_3 = 1, 2.0, "1" true_size = 1.0 ## Equality size_1 == true_size ## Non-Equality size_2 != true_size ## Inequality s1 = size_1 > size_2 s2 = size_1 < size_2/2 s3 = true_size <= size_1 s4 = size_2 <= true_size ``` ### Logical ``` size_1 == true_size size_1 size_1 is true_size size_1 is not true_size P, Q = True, False conj = P and Q disj = P or Q disj nand = not (P and Q) nand xor = (not P and Q) or (P and not Q) xor ``` ## Input and Output ``` print("Helllo World!") cnt = 14000 string = "Hello World!" print(string, ", Current COVID count is", cnt) cnt += 10000 print(f"{string}, current count is: {cnt}") sem_grade = 85.25 name = "jerbox" print("Hello {}, your semestral grade is: {}".format(name, sem_grade)) pg, mg, fg = 0.3, 0.3, 0.4 print("The weights of your semestral grade are:\ \n\t {:.2%} for Prelims\ \n\t {:.2%} for Midterms, and\ \n\t {:.2%} for Finals.".format(pg, mg, fg)) e = input("Enter a number: ") name = input("Enter your name: "); pg = input("Enter prelim grade: ") mg = input("Enter midterm grade: ") fg = input("Enter final grade: ") sem_grade = None print("Hello {}, your semestral grade is: {}".format(name, sem_grade)) ``` ### Looping Statements ## While ``` i, j = 0, 10 while(i<=j): print(f"{i}\t|\t{j}") i += 1 ``` ## For ``` i = 0 for i in range(11): print(i) playlist = ["Bahay Kubo", "Magandang Kanta", "Kubo"] print('Now playing:\n') for song in playlist: print(song) ``` ##Flow Control ###Condition Statements ``` num_1, num_2 = 14, 12 if(num_1 == num_2): print("HAHA") elif(num_1>num_2): print("HOHO") else: print("HUHU") ``` ##Functions ``` # void Deleteuser (int userid){ # delete(userid); # } def delete_user (userid): print("Successfully deleted user: {}".format(userid)) addend1, addend2 = 5, 6 def add(addend1, addend2): sum = addend1 + addend2 return sum add(5, 4) ```
github_jupyter
#### This notebook is used to train a character recongition from input image using MobileNets ``` # ignore warning import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from keras.preprocessing.image import ImageDataGenerator from keras.applications import MobileNetV2 from keras.layers import AveragePooling2D from keras.layers import Dropout from keras.layers import Flatten from keras.layers import Dense from keras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.preprocessing.image import img_to_array from keras.preprocessing.image import load_img from keras.utils import to_categorical from keras.callbacks import ModelCheckpoint, EarlyStopping from keras.models import model_from_json from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import glob import numpy as np ``` ### Visualize dataset ``` dataset_paths = glob.glob("dataset_characters/**/*.jpg") cols=4 rows=3 fig = plt.figure(figsize=(10,8)) plt.rcParams.update({"font.size":14}) grid = gridspec.GridSpec(ncols=cols,nrows=rows,figure=fig) # create a random list of images will be displayed np.random.seed(45) rand = np.random.randint(0,len(dataset_paths),size=(cols*rows)) # Plot image for i in range(cols*rows): fig.add_subplot(grid[i]) image = load_img(dataset_paths[rand[i]]) label = dataset_paths[rand[i]].split(os.path.sep)[-2] plt.title('"{:s}"'.format(label)) plt.axis(False) plt.imshow(image) plt.savefig("Visualize_dataset.jpg",dpi=300) ``` ## Data pre-processing ``` # Arange input data and corresponding labels X=[] labels=[] for image_path in dataset_paths: label = image_path.split(os.path.sep)[-2] image=load_img(image_path,target_size=(80,80)) image=img_to_array(image) X.append(image) labels.append(label) X = np.array(X,dtype="float16") labels = np.array(labels) print("[INFO] Find {:d} images with {:d} classes".format(len(X),len(set(labels)))) # perform one-hot encoding on the labels lb = LabelEncoder() lb.fit(labels) labels = lb.transform(labels) y = to_categorical(labels) # save label file so we can use in another script np.save('license_character_classes.npy', lb.classes_) # split 10% of data as validation set (trainX, testX, trainY, testY) = train_test_split(X, y, test_size=0.10, stratify=y, random_state=42) # data augumentation image_gen = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.1, zoom_range=0.1, fill_mode="nearest" ) ``` ## Initialize MobileNets architecture with pre-trained weight ``` # Create our model with pre-trained MobileNetV2 architecture from imagenet def create_model(lr=1e-4,decay=1e-4/25, training=False,output_shape=y.shape[1]): baseModel = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(80, 80, 3))) headModel = baseModel.output headModel = AveragePooling2D(pool_size=(3, 3))(headModel) headModel = Flatten(name="flatten")(headModel) headModel = Dense(128, activation="relu")(headModel) headModel = Dropout(0.5)(headModel) headModel = Dense(output_shape, activation="softmax")(headModel) model = Model(inputs=baseModel.input, outputs=headModel) if training: # define trainable lalyer for layer in baseModel.layers: layer.trainable = True # compile model optimizer = Adam(lr=lr, decay = decay) model.compile(loss="categorical_crossentropy", optimizer=optimizer,metrics=["accuracy"]) return model # initilaize initial hyperparameter INIT_LR = 1e-4 EPOCHS = 30 model = create_model(lr=INIT_LR, decay=INIT_LR/EPOCHS,training=True) ``` ## Train model ``` BATCH_SIZE = 64 my_checkpointer = [ EarlyStopping(monitor='val_loss', patience=5, verbose=0), ModelCheckpoint(filepath="License_character_recognition.h5", verbose=1, save_weights_only=True) ] result = model.fit(image_gen.flow(trainX, trainY, batch_size=BATCH_SIZE), steps_per_epoch=len(trainX) // BATCH_SIZE, validation_data=(testX, testY), validation_steps=len(testX) // BATCH_SIZE, epochs=EPOCHS, callbacks=my_checkpointer) ``` ## Visualize training result ``` fig = plt.figure(figsize=(14,5)) grid=gridspec.GridSpec(ncols=2,nrows=1,figure=fig) fig.add_subplot(grid[0]) plt.plot(result.history['accuracy'], label='training accuracy') plt.plot(result.history['val_accuracy'], label='val accuracy') plt.title('Accuracy') plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend() fig.add_subplot(grid[1]) plt.plot(result.history['loss'], label='training loss') plt.plot(result.history['val_loss'], label='val loss') plt.title('Loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend() #plt.savefig("Training_result.jpg",dpi=300) # save model architectur as json file model_json = model.to_json() with open("MobileNets_character_recognition.json", "w") as json_file: json_file.write(model_json) print(trainX) print(testX) ``` ## The End!
github_jupyter
SAM001a - Query Storage Pool from SQL Server Master Pool (1 of 3) - Load sample data ==================================================================================== Description ----------- In this 3 part tutorial, load data into the Storage Pool (HDFS) using `azdata`, convert it into Parquet (using Spark) and the in the 3rd part, query the data using the Master Pool (SQL Server) ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportability, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) # Display an install HINT, so the user can click on a SOP to install the missing binary # if which_binary == None: print(f"The path used to search for '{cmd_actual[0]}' was:") print(sys.path) if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" # Load this notebook as json to get access to the expert rules in the notebook metadata. # try: j = load_json("sam001a-load-sample-data-into-bdc.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "expanded_rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["expanded_rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', 'ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ["[Errno 2] No such file or directory: '..\\\\", 'TSG053 - ADS Provided Books must be saved before use', '../repair/tsg053-save-book-first.ipynb'], ["NameError: name 'azdata_login_secret_name' is not defined", 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', "TSG124 - 'No credentials were supplied' error from azdata login", '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', "TSG126 - azdata fails with 'accept the license terms to use this product'", '../repair/tsg126-accept-license-terms.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Create a temporary directory to stage files ``` # Create a temporary directory to hold configuration files import tempfile temp_dir = tempfile.mkdtemp() print(f"Temporary directory created: {temp_dir}") ``` ### Helper function to save configuration files to disk ``` # Define helper function 'save_file' to save configuration files to the temporary directory created above import os import io def save_file(filename, contents): with io.open(os.path.join(temp_dir, filename), "w", encoding='utf8', newline='\n') as text_file: text_file.write(contents) print("File saved: " + os.path.join(temp_dir, filename)) print("Function `save_file` defined successfully.") ``` ### Get the controller username and password Get the controller username and password from the Kubernetes Secret Store and place in the required AZDATA\_USERNAME and AZDATA\_PASSWORD environment variables. ``` # Place controller secret in AZDATA_USERNAME/AZDATA_PASSWORD environment variables import os, base64 os.environ["AZDATA_USERNAME"] = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.username}}', return_output=True, base64_decode=True) os.environ["AZDATA_PASSWORD"] = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.password}}', return_output=True, base64_decode=True) print(f"Controller username '{os.environ['AZDATA_USERNAME']}' and password stored in environment variables") ``` ### Steps Upload this data into HDFS. ``` import os items = [ [1, "Eldon Base for stackable storage shelf platinum", "Muhammed MacIntyre", 3, -213.25, 38.94, 35, "Nunavut", "Storage & Organization", 0.8], [2, "1.7 Cubic Foot Compact ""Cube"" Office Refrigerators", "Barry French", 293, 457.81, 208.16, 68.02, "Nunavut", "Appliances", 0.58], [3, "Cardinal Slant-D Ring Binder Heavy Gauge Vinyl", "Barry French", 293,46.71, 8.69, 2.99, "Nunavut", "Binders and Binder Accessories", 0.39], [4, "R380", "Clay Rozendal", 483, 1198.97, 195.99, 3.99, "Nunavut", "Telephones and Communication", 0.58], [5, "Holmes HEPA Air Purifier", "Carlos Soltero", 515, 30.94, 21.78, 5.94, "Nunavut", "Appliances", 0.5], [6, "G.E. Longer-Life Indoor Recessed Floodlight Bulbs", "Carlos Soltero", 515, 4.43, 6.64, 4.95, "Nunavut", "Office Furnishings", 0.37], [7, "Angle-D Binders with Locking Rings Label Holders", "Carl Jackson", 613, -54.04, 7.3, 7.72, "Nunavut", "Binders and Binder Accessories", 0.38], [8, "SAFCO Mobile Desk Side File Wire Frame", "Carl Jackson", 613, 127.7, 42.76, 6.22, "Nunavut", "Storage & Organization", ], [9, "SAFCO Commercial Wire Shelving Black", "Monica Federle", 643, -695.26, 138.14, 35, "Nunavut", "Storage & Organization", ], [10, "Xerox 198", "Dorothy Badders", 678, -226.36, 4.98, 8.33, "Nunavut", "Paper", 0.38] ] src = os.path.join(temp_dir, "items.csv") dest = "/tmp/clickstream_data/datasampleCS.csv" s = "" for item in items: s = s + str(item)[1:-1] + "\n" save_file(src, s) run(f'azdata bdc hdfs rm --path {dest}') src = src.replace("\\", "\\\\") run(f'azdata bdc hdfs rm --path hdfs:{dest}') run(f'azdata bdc hdfs cp --from-path {src} --to-path hdfs:{dest}') print (f"CSV uploaded to HDFS: {dest}") ``` ### Clean up temporary directory for staging configuration files ``` # Delete the temporary directory used to hold configuration files import shutil shutil.rmtree(temp_dir) print(f'Temporary directory deleted: {temp_dir}') print('Notebook execution complete.') ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/composite_bands.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/composite_bands.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/composite_bands.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/composite_bands.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.eefolium as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40,-100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Add Earth Engine dataset # There are many fine places to look here is one. Comment # this out if you want to twiddle knobs while panning around. Map.setCenter(-61.61625, -11.64273, 14) # Grab a sample L7 image and pull out the RGB and pan bands # in the range (0, 1). (The range of the pan band values was # chosen to roughly match the other bands.) image1 = ee.Image('LANDSAT/LE7/LE72300681999227EDC00') rgb = image1.select('B3', 'B2', 'B1').unitScale(0, 255) gray = image1.select('B8').unitScale(0, 155) # Convert to HSV, swap in the pan band, and convert back to RGB. huesat = rgb.rgbToHsv().select('hue', 'saturation') upres = ee.Image.cat(huesat, gray).hsvToRgb() # Display before and after layers using the same vis parameters. visparams = {'min': [.15, .15, .25], 'max': [1, .9, .9], 'gamma': 1.6} Map.addLayer(rgb, visparams, 'Orignal') Map.addLayer(upres, visparams, 'Pansharpened') ``` ## Display Earth Engine data layers ``` Map.addLayerControl() # This line is not needed for ipyleaflet-based Map. Map ```
github_jupyter
![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/webinars_conferences_etc/python_web_conf/NLU_crashcourse_py_web.ipynb) <div> <img src="https://2021.pythonwebconf.com/images/pwcgenericlogo-opt2.jpg" width="400" height="250" > </div> # NLU 20 Minutes Crashcourse - the fast Data Science route This short notebook will teach you a lot of things! - Sentiment classification, binary, multi class and regressive - Extract Parts of Speech (POS) - Extract Named Entities (NER) - Extract Keywords (YAKE!) - Answer Open and Closed book questions with T5 - Summarize text and more with Multi task T5 - Translate text with Microsofts Marian Model - Train a Multi Lingual Classifier for 100+ languages from a dataset with just one language ## More ressources - [Join our Slack](https://join.slack.com/t/spark-nlp/shared_invite/zt-lutct9gm-kuUazcyFKhuGY3_0AMkxqA) - [NLU Website](https://nlu.johnsnowlabs.com/) - [NLU Github](https://github.com/JohnSnowLabs/nlu) - [Many more NLU example tutorials](https://github.com/JohnSnowLabs/nlu/tree/master/examples) - [Overview of every powerful nlu 1-liner](https://nlu.johnsnowlabs.com/docs/en/examples) - [Checkout the Modelshub for an overview of all models](https://nlp.johnsnowlabs.com/models) - [Checkout the NLU Namespace where you can find every model as a tabel](https://nlu.johnsnowlabs.com/docs/en/namespace) - [Intro to NLU article](https://medium.com/spark-nlp/1-line-of-code-350-nlp-models-with-john-snow-labs-nlu-in-python-2f1c55bba619) - [Indepth and easy Sentence Similarity Tutorial, with StackOverflow Questions using BERTology embeddings](https://medium.com/spark-nlp/easy-sentence-similarity-with-bert-sentence-embeddings-using-john-snow-labs-nlu-ea078deb6ebf) - [1 line of Python code for BERT, ALBERT, ELMO, ELECTRA, XLNET, GLOVE, Part of Speech with NLU and t-SNE](https://medium.com/spark-nlp/1-line-of-code-for-bert-albert-elmo-electra-xlnet-glove-part-of-speech-with-nlu-and-t-sne-9ebcd5379cd) # Install NLU You need Java8, Pyspark and Spark-NLP installed, [see the installation guide for instructions](https://nlu.johnsnowlabs.com/docs/en/install). If you need help or run into troubles, [ping us on slack :)](https://join.slack.com/t/spark-nlp/shared_invite/zt-lutct9gm-kuUazcyFKhuGY3_0AMkxqA) ``` import os ! apt-get update -qq > /dev/null # Install java ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! pip install nlu pyspark==2.4.7 > /dev/null import nlu ``` # Simple NLU basics on Strings ## Context based spell Checking in 1 line ![Spell Check](https://i.imgflip.com/52wb7w.jpg) ``` nlu.load('spell').predict('I also liek to live dangertus') ``` ## Binary Sentiment classification in 1 Line ![Binary Sentiment](https://cdn.pixabay.com/photo/2015/11/13/10/07/smiley-1041796_960_720.jpg) ``` nlu.load('sentiment').predict('I love NLU and rainy days!') ``` ## Part of Speech (POS) in 1 line ![Parts of Speech](https://image.shutterstock.com/image-photo/blackboard-background-written-colorful-chalk-600w-1166166529.jpg) |Tag |Description | Example| |------|------------|------| |CC| Coordinating conjunction | This batch of mushroom stew is savory **and** delicious | |CD| Cardinal number | Here are **five** coins | |DT| Determiner | **The** bunny went home | |EX| Existential there | **There** is a storm coming | |FW| Foreign word | I'm having a **déjà vu** | |IN| Preposition or subordinating conjunction | He is cleverer **than** I am | |JJ| Adjective | She wore a **beautiful** dress | |JJR| Adjective, comparative | My house is **bigger** than yours | |JJS| Adjective, superlative | I am the **shortest** person in my family | |LS| List item marker | A number of things need to be considered before starting a business **,** such as premises **,** finance **,** product demand **,** staffing and access to customers | |MD| Modal | You **must** stop when the traffic lights turn red | |NN| Noun, singular or mass | The **dog** likes to run | |NNS| Noun, plural | The **cars** are fast | |NNP| Proper noun, singular | I ordered the chair from **Amazon** | |NNPS| Proper noun, plural | We visted the **Kennedys** | |PDT| Predeterminer | **Both** the children had a toy | |POS| Possessive ending | I built the dog'**s** house | |PRP| Personal pronoun | **You** need to stop | |PRP$| Possessive pronoun | Remember not to judge a book by **its** cover | |RB| Adverb | The dog barks **loudly** | |RBR| Adverb, comparative | Could you sing more **quietly** please? | |RBS| Adverb, superlative | Everyone in the race ran fast, but John ran **the fastest** of all | |RP| Particle | He ate **up** all his dinner | |SYM| Symbol | What are you doing **?** | |TO| to | Please send it back **to** me | |UH| Interjection | **Wow!** You look gorgeous | |VB| Verb, base form | We **play** soccer | |VBD| Verb, past tense | I **worked** at a restaurant | |VBG| Verb, gerund or present participle | **Smoking** kills people | |VBN| Verb, past participle | She has **done** her homework | |VBP| Verb, non-3rd person singular present | You **flit** from place to place | |VBZ| Verb, 3rd person singular present | He never **calls** me | |WDT| Wh-determiner | The store honored the complaints, **which** were less than 25 days old | |WP| Wh-pronoun | **Who** can help me? | |WP\$| Possessive wh-pronoun | **Whose** fault is it? | |WRB| Wh-adverb | **Where** are you going? | ``` nlu.load('pos').predict('POS assigns each token in a sentence a grammatical label') ``` ## Named Entity Recognition (NER) in 1 line ![NER](http://ckl-it.de/wp-content/uploads/2021/02/ner-1.png) |Type | Description | |------|--------------| | PERSON | People, including fictional like **Harry Potter** | | NORP | Nationalities or religious or political groups like the **Germans** | | FAC | Buildings, airports, highways, bridges, etc. like **New York Airport** | | ORG | Companies, agencies, institutions, etc. like **Microsoft** | | GPE | Countries, cities, states. like **Germany** | | LOC | Non-GPE locations, mountain ranges, bodies of water. Like the **Sahara desert**| | PRODUCT | Objects, vehicles, foods, etc. (Not services.) like **playstation** | | EVENT | Named hurricanes, battles, wars, sports events, etc. like **hurricane Katrina**| | WORK_OF_ART | Titles of books, songs, etc. Like **Mona Lisa** | | LAW | Named documents made into laws. Like : **Declaration of Independence** | | LANGUAGE | Any named language. Like **Turkish**| | DATE | Absolute or relative dates or periods. Like every second **friday**| | TIME | Times smaller than a day. Like **every minute**| | PERCENT | Percentage, including ”%“. Like **55%** of workers enjoy their work | | MONEY | Monetary values, including unit. Like **50$** for those pants | | QUANTITY | Measurements, as of weight or distance. Like this person weights **50kg** | | ORDINAL | “first”, “second”, etc. Like David placed **first** in the tournament | | CARDINAL | Numerals that do not fall under another type. Like **hundreds** of models are avaiable in NLU | ``` nlu.load('ner').predict("John Snow Labs congratulates the Amarican John Biden to winning the American election!", output_level='chunk') ``` # Let's apply NLU to a dataset! <div> <img src="http://ckl-it.de/wp-content/uploads/2021/02/crypto.jpeg " width="400" height="250" > </div> ``` import pandas as pd import nlu !wget http://ckl-it.de/wp-content/uploads/2020/12/small_btc.csv df = pd.read_csv('/content/small_btc.csv').iloc[0:5000].title df ``` ## NER on a Crypto News dataset ### The **NER** model which you can load via `nlu.load('ner')` recognizes 18 different classes in your dataset. We set output level to chunk, so that we get 1 row per NER class. #### Predicted entities: NER is avaiable in many languages, which you can [find in the John Snow Labs Modelshub](https://nlp.johnsnowlabs.com/models) ``` ner_df = nlu.load('ner').predict(df, output_level = 'chunk') ner_df ``` ### Top 50 Named Entities ``` ner_df.entities.value_counts()[:100].plot.barh(figsize = (16,20)) ``` ### Top 50 Named Entities which are PERSONS ``` ner_df[ner_df.entities_class == 'PERSON'].entities.value_counts()[:50].plot.barh(figsize=(18,20), title ='Top 50 Occuring Persons in the dataset') ``` ### Top 50 Named Entities which are Countries/Cities/States ``` ner_df[ner_df.entities_class == 'GPE'].entities.value_counts()[:50].plot.barh(figsize=(18,20),title ='Top 50 Countries/Cities/States Occuring in the dataset') ``` ### Top 50 Named Entities which are PRODUCTS ``` ner_df[ner_df.entities_class == 'PRODUCT'].entities.value_counts()[:50].plot.barh(figsize=(18,20),title ='Top 50 products occuring in the dataset') ``` ### Top 50 Named Entities which are ORGANIZATIONS ``` ner_df[ner_df.entities_class == 'ORG'].entities.value_counts()[:50].plot.barh(figsize=(18,20),title ='Top 50 products occuring in the dataset') ``` ## YAKE on a Crypto News dataset ### The **YAKE!** model (Yet Another Keyword Extractor) is a **unsupervised** keyword extraction algorithm. You can load it via which you can load via `nlu.load('yake')`. It has no weights and is very fast. It has various parameters that can be configured to influence which keywords are beeing extracted, [here for an more indepth YAKE guide](https://github.com/JohnSnowLabs/nlu/blob/master/examples/webinars_conferences_etc/multi_lingual_webinar/1_NLU_base_features_on_dataset_with_YAKE_Lemma_Stemm_classifiers_NER_.ipynb) ``` yake_df = nlu.load('yake').predict(df) yake_df ``` ### Top 50 extracted Keywords with YAKE! ``` yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:50].plot.barh(figsize=(14,18)) ``` ## Binary Sentimental Analysis and Distribution on a dataset ``` sent_df = nlu.load('sentiment').predict(df) sent_df sent_df.sentiment.value_counts().plot.bar(title='Sentiment ') ``` ## Emotional Analysis and Distribution of Headlines ``` emo_df = nlu.load('emotion').predict(df) emo_df emo_df.emotion.value_counts().plot.bar(title='Emotion Distribution') ``` **Make sure to restart your notebook again** before starting the next section ``` print("Please restart kernel if you are in google colab and run next cell after the restart to configure java 8 back") 1+'wait' # This configures colab to use Java 8 again. # You need to run this in Google colab, because after restart it likes to set Java 11 as default, which will cause issues ! echo 2 | update-alternatives --config java import pandas as pd import nlu ``` # Answer **Closed Book** and Open **Book Questions** with Google's T5! <!-- [T5]() --> ![T5 GIF](https://1.bp.blogspot.com/-o4oiOExxq1s/Xk26XPC3haI/AAAAAAAAFU8/NBlvOWB84L0PTYy9TzZBaLf6fwPGJTR0QCLcBGAsYHQ/s1600/image3.gif) You can load the **question answering** model with `nlu.load('en.t5')` ``` # Load question answering T5 model t5_closed_question = nlu.load('en.t5') ``` ## Answer **Closed Book Questions** Closed book means that no additional context is given and the model must answer the question with the knowledge stored in it's weights ``` t5_closed_question.predict("Who is president of Nigeria?") t5_closed_question.predict("What is the most common language in India?") t5_closed_question.predict("What is the capital of Germany?") ``` ## Answer **Open Book Questions** These are questions where we give the model some additional context, that is used to answer the question ``` t5_open_book = nlu.load('answer_question') context = 'Peters last week was terrible! He had an accident and broke his leg while skiing!' question1 = 'Why was peters week so bad?' question2 = 'How did peter broke his leg?' t5_open_book.predict([question1+context, question2 + context]) # Ask T5 questions in the context of a News Article question1 = 'Who is Jack ma?' question2 = 'Who is founder of Alibaba Group?' question3 = 'When did Jack Ma re-appear?' question4 = 'How did Alibaba stocks react?' question5 = 'Whom did Jack Ma meet?' question6 = 'Who did Jack Ma hide from?' # from https://www.bbc.com/news/business-55728338 news_article_context = """ context: Alibaba Group founder Jack Ma has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news. """ questions = [ question1+ news_article_context, question2+ news_article_context, question3+ news_article_context, question4+ news_article_context, question5+ news_article_context, question6+ news_article_context,] t5_open_book.predict(questions) ``` # Multi Problem T5 model for Summarization and more The main T5 model was trained for over 20 tasks from the SQUAD/GLUE/SUPERGLUE datasets. See [this notebook](https://github.com/JohnSnowLabs/nlu/blob/master/examples/webinars_conferences_etc/multi_lingual_webinar/7_T5_SQUAD_GLUE_SUPER_GLUE_TASKS.ipynb) for a demo of all tasks # Overview of every task available with T5 [The T5 model](https://arxiv.org/pdf/1910.10683.pdf) is trained on various datasets for 17 different tasks which fall into 8 categories. 1. Text summarization 2. Question answering 3. Translation 4. Sentiment analysis 5. Natural Language inference 6. Coreference resolution 7. Sentence Completion 8. Word sense disambiguation ### Every T5 Task with explanation: |Task Name | Explanation | |----------|--------------| |[1.CoLA](https://nyu-mll.github.io/CoLA/) | Classify if a sentence is gramaticaly correct| |[2.RTE](https://dl.acm.org/doi/10.1007/11736790_9) | Classify whether if a statement can be deducted from a sentence| |[3.MNLI](https://arxiv.org/abs/1704.05426) | Classify for a hypothesis and premise whether they contradict or contradict each other or neither of both (3 class).| |[4.MRPC](https://www.aclweb.org/anthology/I05-5002.pdf) | Classify whether a pair of sentences is a re-phrasing of each other (semantically equivalent)| |[5.QNLI](https://arxiv.org/pdf/1804.07461.pdf) | Classify whether the answer to a question can be deducted from an answer candidate.| |[6.QQP](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs) | Classify whether a pair of questions is a re-phrasing of each other (semantically equivalent)| |[7.SST2](https://www.aclweb.org/anthology/D13-1170.pdf) | Classify the sentiment of a sentence as positive or negative| |[8.STSB](https://www.aclweb.org/anthology/S17-2001/) | Classify the sentiment of a sentence on a scale from 1 to 5 (21 Sentiment classes)| |[9.CB](https://ojs.ub.uni-konstanz.de/sub/index.php/sub/article/view/601) | Classify for a premise and a hypothesis whether they contradict each other or not (binary).| |[10.COPA](https://www.aaai.org/ocs/index.php/SSS/SSS11/paper/view/2418/0) | Classify for a question, premise, and 2 choices which choice the correct choice is (binary).| |[11.MultiRc](https://www.aclweb.org/anthology/N18-1023.pdf) | Classify for a question, a paragraph of text, and an answer candidate, if the answer is correct (binary),| |[12.WiC](https://arxiv.org/abs/1808.09121) | Classify for a pair of sentences and a disambigous word if the word has the same meaning in both sentences.| |[13.WSC/DPR](https://www.aaai.org/ocs/index.php/KR/KR12/paper/view/4492/0) | Predict for an ambiguous pronoun in a sentence what it is referring to. | |[14.Summarization](https://arxiv.org/abs/1506.03340) | Summarize text into a shorter representation.| |[15.SQuAD](https://arxiv.org/abs/1606.05250) | Answer a question for a given context.| |[16.WMT1.](https://arxiv.org/abs/1706.03762) | Translate English to German| |[17.WMT2.](https://arxiv.org/abs/1706.03762) | Translate English to French| |[18.WMT3.](https://arxiv.org/abs/1706.03762) | Translate English to Romanian| ``` # Load the Multi Task Model T5 t5_multi = nlu.load('en.t5.base') # https://www.reuters.com/article/instant-article/idCAKBN2AA2WF text = """(Reuters) - Mastercard Inc said on Wednesday it was planning to offer support for some cryptocurrencies on its network this year, joining a string of big-ticket firms that have pledged similar support. The credit-card giant’s announcement comes days after Elon Musk’s Tesla Inc revealed it had purchased $1.5 billion of bitcoin and would soon accept it as a form of payment. Asset manager BlackRock Inc and payments companies Square and PayPal have also recently backed cryptocurrencies. Mastercard already offers customers cards that allow people to transact using their cryptocurrencies, although without going through its network. "Doing this work will create a lot more possibilities for shoppers and merchants, allowing them to transact in an entirely new form of payment. This change may open merchants up to new customers who are already flocking to digital assets," Mastercard said. (mstr.cd/3tLaPZM) Mastercard specified that not all cryptocurrencies will be supported on its network, adding that many of the hundreds of digital assets in circulation still need to tighten their compliance measures. Many cryptocurrencies have struggled to win the trust of mainstream investors and the general public due to their speculative nature and potential for money laundering. """ t5_multi['t5'].setTask('summarize ') short = t5_multi.predict(text) short print(f"Original Length {len(short.document.iloc[0])} Summarized Length : {len(short.T5.iloc[0])} \n summarized text :{short.T5.iloc[0]} ") short.T5.iloc[0] ``` **Make sure to restart your notebook again** before starting the next section ``` print("Please restart kernel if you are in google colab and run next cell after the restart to configure java 8 back") 1+'wait' # This configures colab to use Java 8 again. # You need to run this in Google colab, because after restart it likes to set Java 11 as default, which will cause issues ! echo 2 | update-alternatives --config java ``` # Translate between more than 200 Languages with [ Microsofts Marian Models](https://marian-nmt.github.io/publications/) Marian is an efficient, free Neural Machine Translation framework mainly being developed by the Microsoft Translator team (646+ pretrained models & pipelines in 192+ languages) You need to specify the language your data is in as `start_language` and the language you want to translate to as `target_language`. The language references must be [ISO language codes](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) `nlu.load('<start_language>.translate_to.<target_language>')` **Translate Turkish to English:** `nlu.load('tr.translate_to.en')` **Translate English to French:** `nlu.load('en.translate_to.fr')` **Translate French to Hebrew:** `nlu.load('fr.translate_to.he')` ![Languages](https://camo.githubusercontent.com/b548abf3d1f9657d01fd74404354ec49fc11eea0/687474703a2f2f636b6c2d69742e64652f77702d636f6e74656e742f75706c6f6164732f323032312f30322f666c6167732e6a706567) ``` import nlu import pandas as pd !wget http://ckl-it.de/wp-content/uploads/2020/12/small_btc.csv df = pd.read_csv('/content/small_btc.csv').iloc[0:20].title ``` ## Translate to German ``` translate_pipe = nlu.load('en.translate_to.de') translate_pipe.predict(df) ``` ## Translate to Chinese ``` translate_pipe = nlu.load('en.translate_to.zh') translate_pipe.predict(df) ``` ## Translate to Hindi ``` translate_pipe = nlu.load('en.translate_to.hi') translate_pipe.predict(df) ``` # Train a Multi Lingual Classifier for 100+ languages from a dataset with just one language [Leverage Language-agnostic BERT Sentence Embedding (LABSE)​ and acheive state of the art!](https://arxiv.org/abs/2007.01852) ​ ​ Training a classifier with LABSE embeddings enables the knowledge to be transferred to 109 languages! With the [SentimentDL model](https://nlp.johnsnowlabs.com/docs/en/annotators#sentimentdl-multi-class-sentiment-analysis-annotator) from Spark NLP you can achieve State Of the Art results on any binary class text classification problem. ### Languages suppoted by LABSE ![labse languages](http://ckl-it.de/wp-content/uploads/2021/02/LABSE.png) ``` # Download French twitter Sentiment dataset https://www.kaggle.com/hbaflast/french-twitter-sentiment-analysis ! wget http://ckl-it.de/wp-content/uploads/2021/02/french_tweets.csv import pandas as pd train_path = '/content/french_tweets.csv' train_df = pd.read_csv(train_path) # the text data to use for classification should be in a column named 'text' columns=['text','y'] train_df = train_df[columns] train_df = train_df.sample(frac=1).reset_index(drop=True) train_df ``` ## Train Deep Learning Classifier using `nlu.load('train.sentiment')` Al you need is a Pandas Dataframe with a label column named `y` and the column with text data should be named `text` We are training on a french dataset and can then predict classes correct **in 100+ langauges** ``` # Train longer! trainable_pipe = nlu.load('xx.embed_sentence.labse train.sentiment') trainable_pipe['sentiment_dl'].setMaxEpochs(60) trainable_pipe['sentiment_dl'].setLr(0.005) fitted_pipe = trainable_pipe.fit(train_df.iloc[:2000]) # predict with the trainable pipeline on dataset and get predictions preds = fitted_pipe.predict(train_df.iloc[:2000],output_level='document') #sentence detector that is part of the pipe generates sone NaNs. lets drop them first preds.dropna(inplace=True) print(classification_report(preds['y'], preds['sentiment'])) preds ``` ### Test the fitted pipe on new example #### The Model understands Englsih ![en](https://www.worldometers.info/img/flags/small/tn_nz-flag.gif) ``` fitted_pipe.predict("This was awful!") fitted_pipe.predict("This was great!") ``` #### The Model understands German ![de](https://www.worldometers.info/img/flags/small/tn_gm-flag.gif) ``` # German for:' this movie was great!' fitted_pipe.predict("Der Film war echt klasse!") # German for: 'This movie was really boring' fitted_pipe.predict("Der Film war echt langweilig!") ``` #### The Model understands Chinese ![zh](https://www.worldometers.info/img/flags/small/tn_ch-flag.gif) ``` # Chinese for: "This model was awful!" fitted_pipe.predict("这部电影太糟糕了!") # Chine for : "This move was great!" fitted_pipe.predict("此举很棒!") ``` #### Model understanda Afrikaans ![af](https://www.worldometers.info/img/flags/small/tn_sf-flag.gif) ``` # Afrikaans for 'This movie was amazing!' fitted_pipe.predict("Hierdie film was ongelooflik!") # Afrikaans for :'The movie made me fall asleep, it's awful!' fitted_pipe.predict('Die film het my aan die slaap laat raak, dit is verskriklik!') ``` #### The model understands Vietnamese ![vi](https://www.worldometers.info/img/flags/small/tn_vm-flag.gif) ``` # Vietnamese for : 'The movie was painful to watch' fitted_pipe.predict('Phim đau điếng người xem') # Vietnamese for : 'This was the best movie ever' fitted_pipe.predict('Đây là bộ phim hay nhất từ ​​trước đến nay') ``` #### The model understands Japanese ![ja](https://www.worldometers.info/img/flags/small/tn_ja-flag.gif) ``` # Japanese for : 'This is now my favorite movie!' fitted_pipe.predict('これが私のお気に入りの映画です!') # Japanese for : 'I would rather kill myself than watch that movie again' fitted_pipe.predict('その映画をもう一度見るよりも自殺したい') ``` # There are many more models you can put to use in 1 line of code! ## Checkout [the Modelshub](https://nlp.johnsnowlabs.com/models) and the [NLU Namespace](https://nlu.johnsnowlabs.com/docs/en/namespace) for more models ### More ressources - [Join our Slack](https://join.slack.com/t/spark-nlp/shared_invite/zt-lutct9gm-kuUazcyFKhuGY3_0AMkxqA) - [NLU Website](https://nlu.johnsnowlabs.com/) - [NLU Github](https://github.com/JohnSnowLabs/nlu) - [Many more NLU example tutorials](https://github.com/JohnSnowLabs/nlu/tree/master/examples) - [Overview of every powerful nlu 1-liner](https://nlu.johnsnowlabs.com/docs/en/examples) - [Checkout the Modelshub for an overview of all models](https://nlp.johnsnowlabs.com/models) - [Checkout the NLU Namespace where you can find every model as a tabel](https://nlu.johnsnowlabs.com/docs/en/namespace) - [Intro to NLU article](https://medium.com/spark-nlp/1-line-of-code-350-nlp-models-with-john-snow-labs-nlu-in-python-2f1c55bba619) - [Indepth and easy Sentence Similarity Tutorial, with StackOverflow Questions using BERTology embeddings](https://medium.com/spark-nlp/easy-sentence-similarity-with-bert-sentence-embeddings-using-john-snow-labs-nlu-ea078deb6ebf) - [1 line of Python code for BERT, ALBERT, ELMO, ELECTRA, XLNET, GLOVE, Part of Speech with NLU and t-SNE](https://medium.com/spark-nlp/1-line-of-code-for-bert-albert-elmo-electra-xlnet-glove-part-of-speech-with-nlu-and-t-sne-9ebcd5379cd) ``` while 1 : 1 ```
github_jupyter
# Florida Single Weekly Predictions, trained on historical flu data and temperature > Once again, just like before in the USA flu model, I am going to index COVID weekly cases by Wednesdays ``` import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], enable=True) import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn from sklearn import preprocessing ``` ### getting historical flu data ``` system = "Windows" if system == "Windows": flu_dir = "..\\..\\..\\cdc-fludata\\us_national\\" else: flu_dir = "../../../cdc-fludata/us_national/" flu_dictionary = {} for year in range(1997, 2019): filepath = "usflu_" year_string = str(year) + "-" + str(year + 1) filepath = flu_dir + filepath + year_string + ".csv" temp_df = pd.read_csv(filepath) flu_dictionary[year] = temp_df ``` ### combining flu data into one chronological series of total cases ``` # getting total cases and putting them in a series by week flu_series_dict = {} for year in flu_dictionary: temp_df = flu_dictionary[year] temp_df = temp_df.set_index("WEEK") abridged_df = temp_df.iloc[:, 2:] try: abridged_df = abridged_df.drop(columns="PERCENT POSITIVE") except: pass total_cases_series = abridged_df.sum(axis=1) flu_series_dict[year] = total_cases_series all_cases_series = pd.Series(dtype="int64") for year in flu_series_dict: temp_series = flu_series_dict[year] all_cases_series = all_cases_series.append(temp_series, ignore_index=True) all_cases_series all_cases_series.plot(grid=True, figsize=(60,20)) ``` ### Now, making a normalized series between 0, 1 ``` norm_flu_series_dict = {} for year in flu_series_dict: temp_series = flu_series_dict[year] temp_list = preprocessing.minmax_scale(temp_series) temp_series = pd.Series(temp_list) norm_flu_series_dict[year] = temp_series all_cases_norm_series = pd.Series(dtype="int64") for year in norm_flu_series_dict: temp_series = norm_flu_series_dict[year] all_cases_norm_series = all_cases_norm_series.append(temp_series, ignore_index=True) all_cases_norm_series.plot(grid=True, figsize=(60,5)) all_cases_norm_series ``` ## Getting COVID-19 Case Data ``` if system == "Windows": datapath = "..\\..\\..\\COVID-19\\csse_covid_19_data\\csse_covid_19_time_series\\" else: datapath = "../../../COVID-19/csse_covid_19_data/csse_covid_19_time_series/" # Choose from "US Cases", "US Deaths", "World Cases", "World Deaths", "World Recoveries" key = "US Cases" if key == "US Cases": datapath = datapath + "time_series_covid19_confirmed_US.csv" elif key == "US Deaths": datapath = datapath + "time_series_covid19_deaths_US.csv" elif key == "World Cases": datapath = datapath + "time_series_covid19_confirmed_global.csv" elif key == "World Deaths": datapath = datapath + "time_series_covid19_deaths_global.csv" elif key == "World Recoveries": datapath = datapath + "time_series_covid19_recovered_global.csv" covid_df = pd.read_csv(datapath) covid_df florida_data = covid_df.loc[covid_df["Province_State"] == "Florida"] florida_cases = florida_data.iloc[:,11:] florida_cases_total = florida_cases.sum(axis=0) florida_cases_total.plot() ``` ### convert daily data to weekly data ``` florida_weekly_cases = florida_cases_total.iloc[::7] florida_weekly_cases florida_weekly_cases.plot() ``` ### Converting cumulative series to non-cumulative series ``` florida_wnew_cases = florida_weekly_cases.diff() florida_wnew_cases[0] = 1.0 florida_wnew_cases florida_wnew_cases.plot() ``` ### normalizing weekly case data > This is going to be different for texas. This is because, the peak number of weekly new infections probably has not been reached yet. We need to divide everything by a guess for the peak number of predictions instead of min-max scaling. ``` # I'm guessing that the peak number of weekly cases will be about 60,000. Could definitely be wrong. peak_guess = 60000 florida_wnew_cases_norm = florida_wnew_cases / peak_guess florida_wnew_cases_norm.plot() florida_wnew_cases_norm ``` ## getting temperature data > At the moment, this will be dummy data ``` flu_temp_data = np.full(len(all_cases_norm_series), 0.5) training_data_df = pd.DataFrame({ "Temperature" : flu_temp_data, "Flu Cases" : all_cases_norm_series }) training_data_df covid_temp_data = np.full(len(florida_wnew_cases_norm), 0.5) testing_data_df = pd.DataFrame({ "Temperature" : covid_temp_data, "COVID Cases" : florida_wnew_cases_norm }) testing_data_df testing_data_df.shape training_data_np = training_data_df.values testing_data_np = testing_data_df.values ``` ## Building Neural Net Model ### preparing model data ``` # this code is directly from https://www.tensorflow.org/tutorials/structured_data/time_series # much of below data formatting code is derived straight from same link def multivariate_data(dataset, target, start_index, end_index, history_size, target_size, step, single_step=False): data = [] labels = [] start_index = start_index + history_size if end_index is None: end_index = len(dataset) - target_size for i in range(start_index, end_index): indices = range(i-history_size, i, step) data.append(dataset[indices]) if single_step: labels.append(target[i+target_size]) else: labels.append(target[i:i+target_size]) return np.array(data), np.array(labels) past_history = 22 future_target = 0 STEP = 1 x_train_single, y_train_single = multivariate_data(training_data_np, training_data_np[:, 1], 0, None, past_history, future_target, STEP, single_step=True) x_test_single, y_test_single = multivariate_data(testing_data_np, testing_data_np[:, 1], 0, None, past_history, future_target, STEP, single_step=True) BATCH_SIZE = 300 BUFFER_SIZE = 1000 train_data_single = tf.data.Dataset.from_tensor_slices((x_train_single, y_train_single)) train_data_single = train_data_single.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() test_data_single = tf.data.Dataset.from_tensor_slices((x_test_single, y_test_single)) test_data_single = test_data_single.batch(1).repeat() ``` ### designing actual model ``` # creating the neural network model lstm_prediction_model = tf.keras.Sequential([ tf.keras.layers.LSTM(32, input_shape=x_train_single.shape[-2:]), tf.keras.layers.Dense(32), tf.keras.layers.Dense(1) ]) lstm_prediction_model.compile(optimizer=tf.keras.optimizers.RMSprop(), loss="mae") single_step_history = lstm_prediction_model.fit(train_data_single, epochs=10, steps_per_epoch=250, validation_data=test_data_single, validation_steps=50) def create_time_steps(length): return list(range(-length, 0)) def show_plot(plot_data, delta, title): labels = ['History', 'True Future', 'Model Prediction'] marker = ['.-', 'rx', 'go'] time_steps = create_time_steps(plot_data[0].shape[0]) if delta: future = delta else: future = 0 plt.title(title) for i, x in enumerate(plot_data): if i: plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i]) else: plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i]) plt.legend() plt.xlim([time_steps[0], (future+5)*2]) plt.xlabel('Week (defined by Wednesdays)') plt.ylabel('Normalized Cases') return plt for x, y in train_data_single.take(10): #print(lstm_prediction_model.predict(x)) plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(), lstm_prediction_model.predict(x)[0]], 0, 'Training Data Prediction') plot.show() for x, y in test_data_single.take(1): plot = show_plot([x[0][:, 1].numpy(), y[0].numpy(), lstm_prediction_model.predict(x)[0]], 0, 'Florida COVID Case Prediction, Single Week') plot.show() ```
github_jupyter
### Background and Overview: The [MIMIC-III](https://mimic.mit.edu/about/mimic/) (Medical Information Mart for Intensive Care) Clinical Database is comprised of deidentified health-related data associated with over 40,000 patients (available through request). Its 26 tables have a vast amount of information on the patients who stayed in critical care units of the Beth Israel Deaconess Medical Center between 2001 and 2012 ranging from patient demographics to lab reports to detailed clinical notes. #### Figure 2: Compares an Unstructured Data Field's Values for Alive and Deceased Patients This analysis was done on 1,000 patients, with a 50/50 split between patients who were marked alive and those who were marked deceased. It explores the different reasons patients are admitted to the hospital. ``` # import pandas as pd # import numpy as np # import matplotlib.pyplot as plt from IPython.display import Image """NOTE: This code block is commented out because I have not uploaded data files or source code; it is only here to show to the process.""" # # Set up path/s to file/s # path_to_data = '../data/' # # Read data # alive_admissions = pd.read_csv(path_to_data + 'alive_admissions.csv', header=None, # names=['patient_id', 'flag', 'type']) # deceased_admissions = pd.read_csv(path_to_data + 'deceased_admissions.csv', header=None, # names=['patient_id', 'flag', 'type']) # # Process data # alive_type_count = alive_admissions.groupby(['type']).size().reset_index() # deceased_type_count = deceased_admissions.groupby(['type']).size().reset_index() """NOTE: This code block is also commented out as above; it is only here to show to the process.""" # # Plot the data # n_bars = 3 # index = np.arange(n_bars) # bar_width = 0.5 # fig2 = plt.subplots() # plt.bar(index, alive_type_count[0], bar_width, alpha=0.5, label='alive') # plt.bar(index + bar_width / 2, deceased_type_count[0], bar_width, alpha=0.5, label='deceased') # plt.xticks(index + bar_width / 4, alive_type_count['type']) # plt.ylabel("Frequency") # plt.xlabel("Admission Type") # plt.legend(loc='upper right') # plt.title('Type of Admissions for Alive & Deceased Patients') # plt.tight_layout() # plt.show() Image(filename='Figure_2.png') ``` **Figure 2** reviews the reasons behind why 1,000 patients were admitted to the hospital for each visit. The admission types are segmented by whether the patients were marked alive or deceased. Figure 2 shows that, overall, deceased patients tended to have more emergency-type visits, whereas alive patients tended to have more elective-based visits. This data field can be converted to another feature (e.g.: a normalized number of visits per patient, altogether or for each type of admission) to be added to the feature vectors for training a predictive model. **_Note:_** *as a starting point, I used 1,000 patients. I would like to expand this to a larger subset of the 46,000+ patients available through MIMIC-III. Additionally, I intend to review other unstructured data fields, such as diagnosis description and clinical notes.*
github_jupyter
# Guide for Authors ``` print('Welcome to "The Debugging Book"!') ``` This notebook compiles the most important conventions for all chapters (notebooks) of "The Debugging Book". ## Organization of this Book ### Chapters as Notebooks Each chapter comes in its own _Jupyter notebook_. A single notebook (= a chapter) should cover the material (text and code, possibly slides) for a 90-minute lecture. A chapter notebook should be named `Topic.ipynb`, where `Topic` is the topic. `Topic` must be usable as a Python module and should characterize the main contribution. If the main contribution of your chapter is a class `FooDebugger`, for instance, then your topic (and notebook name) should be `FooDebugger`, such that users can state ```python from FooDebugger import FooDebugger ``` Since class and module names should start with uppercase letters, all non-notebook files and folders start with lowercase letters. this may make it easier to differentiate them. The special notebook `index.ipynb` gets converted into the home pages `index.html` (on fuzzingbook.org) and `README.md` (on GitHub). Notebooks are stored in the `notebooks` folder. ### DebuggingBook and FuzzingBook This project shares some infrastructure (and even chapters) with "The Fuzzing Book", established through _symbolic links_. Your file organization should be such that `debuggingbook` and `fuzzingbook` are checked out in the same folder; otherwise, sharing infrastructure will not work ``` <some folder> |- fuzzingbook |- debuggingbook (this project folder) ``` To check whether the organization fits, check whether the `debuggingbook` `Makefile` properly points to `../fuzzingbook/Makefile` - that is, the `fuzzingbook` `Makefile`. If you can properly open the (shared) `Makefile` in both projects, things are set up properly. ### Output Formats The notebooks by themselves can be used by instructors and students to toy around with. They can edit code (and text) as they like and even run them as a slide show. The notebook can be _exported_ to multiple (non-interactive) formats: * HTML – for placing this material online. * PDF – for printing * Python – for coding * Slides – for presenting The included Makefile can generate all of these automatically (and a few more). At this point, we mostly focus on HTML and Python, as we want to get these out quickly; but you should also occasionally ensure that your notebooks can (still) be exported into PDF. Other formats (Word, Markdown) are experimental. ## Sites All sources for the book end up on the [Github project page](https://github.com/uds-se/debuggingbook). This holds the sources (notebooks), utilities (Makefiles), as well as an issue tracker. The derived material for the book ends up in the `docs/` folder, from where it is eventually pushed to the [debuggingbook website](http://www.debuggingbook.org/). This site allows to read the chapters online, can launch Jupyter notebooks using the binder service, and provides access to code and slide formats. Use `make publish` to create and update the site. ### The Book PDF The book PDF is compiled automatically from the individual notebooks. Each notebook becomes a chapter; references are compiled in the final chapter. Use `make book` to create the book. ## Creating and Building ### Tools you will need To work on the notebook files, you need the following: 1. Jupyter notebook. The easiest way to install this is via the [Anaconda distribution](https://www.anaconda.com/download/). 2. Once you have the Jupyter notebook installed, you can start editing and coding right away by starting `jupyter notebook` (or `jupyter lab`) in the topmost project folder. 3. If (like me) you don't like the Jupyter Notebook interface, I recommend [Jupyter Lab](https://jupyterlab.readthedocs.io/en/stable/), the designated successor to Jupyter Notebook. Invoke it as `jupyter lab`. It comes with a much more modern interface, but misses autocompletion and a couple of extensions. I am running it [as a Desktop application](http://christopherroach.com/articles/jupyterlab-desktop-app/) which gets rid of all the browser toolbars. On the Mac, there is also the [Pineapple app](https://nwhitehead.github.io/pineapple/), which integrates a nice editor with a local server. This is easy to use, but misses a few features; also, it hasn't seen updates since 2015. 4. To create the entire book (with citations, references, and all), you also need the [ipybublish](https://github.com/chrisjsewell/ipypublish) package. This allows you to create the HTML files, merge multiple chapters into a single PDF or HTML file, create slides, and more. The Makefile provides the essential tools for creation. ### Version Control We use git in a single strand of revisions. Feel free branch for features, but eventually merge back into the main "master" branch. Sync early; sync often. Only push if everything ("make all") builds and passes. The Github repo thus will typically reflect work in progress. If you reach a stable milestone, you can push things on the fuzzingbook.org web site, using `make publish`. #### nbdime The [nbdime](https://github.com/jupyter/nbdime) package gives you tools such as `nbdiff` (and even better, `nbdiff-web`) to compare notebooks against each other; this ensures that cell _contents_ are compared rather than the binary format. `nbdime config-git --enable` integrates nbdime with git such that `git diff` runs the above tools; merging should also be notebook-specific. #### nbstripout Notebooks in version control _should not contain output cells,_ as these tend to change a lot. (Hey, we're talking random output generation here!) To have output cells automatically stripped during commit, install the [nbstripout](https://github.com/kynan/nbstripout) package and use ``` nbstripout --install ``` to set it up as a git filter. The `notebooks/` folder comes with a `.gitattributes` file already set up for `nbstripout`, so you should be all set. Note that _published_ notebooks (in short, anything under the `docs/` tree _should_ have their output cells included, such that users can download and edit notebooks with pre-rendered output. This folder contains a `.gitattributes` file that should explicitly disable `nbstripout`, but it can't hurt to check. As an example, the following cell 1. _should_ have its output included in the [HTML version of this guide](https://www.debuggingbook.org/beta/html/Guide_for_Authors.html); 2. _should not_ have its output included in [the git repo](https://github.com/uds-se/debuggingbook/blob/master/notebooks/Guide_for_Authors.ipynb) (`notebooks/`); 3. _should_ have its output included in [downloadable and editable notebooks](https://github.com/uds-se/debuggingbook/blob/master/docs/beta/notebooks/Guide_for_Authors.ipynb) (`docs/notebooks/` and `docs/beta/notebooks/`). ``` import random random.random() ``` ### Inkscape and GraphViz Creating derived files uses [Inkscape](https://inkscape.org/en/) and [Graphviz](https://www.graphviz.org/) – through its [Python wrapper](https://pypi.org/project/graphviz/) – to process SVG images. These tools are not automatically installed, but are available on pip, _brew_ and _apt-get_ for all major distributions. ### LaTeX Fonts By default, creating PDF uses XeLaTeX with a couple of special fonts, which you can find in the `fonts/` folder; install these fonts system-wide to make them accessible to XeLaTeX. You can also run `make LATEX=pdflatex` to use `pdflatex` and standard LaTeX fonts instead. ### Creating Derived Formats (HTML, PDF, code, ...) The [Makefile](../Makefile) provides rules for all targets. Type `make help` for instructions. The Makefile should work with GNU make and a standard Jupyter Notebook installation. To create the multi-chapter book and BibTeX citation support, you need to install the [iPyPublish](https://github.com/chrisjsewell/ipypublish) package (which includes the `nbpublish` command). ### Creating a New Chapter To create a new chapter for the book, 1. Set up a new `.ipynb` notebook file as copy of [Template.ipynb](Template.ipynb). 2. Include it in the `CHAPTERS` list in the `Makefile`. 3. Add it to the git repository. ## Teaching a Topic Each chapter should be devoted to a central concept and a small set of lessons to be learned. I recommend the following structure: * Introduce the problem ("We want to parse inputs") * Illustrate it with some code examples ("Here's some input I'd like to parse") * Develop a first (possibly quick and dirty) solution ("A PEG parser is short and often does the job"_ * Show that it works and how it works ("Here's a neat derivation tree. Look how we can use this to mutate and combine expressions!") * Develop a second, more elaborated solution, which should then become the main contribution. ("Here's a general LR(1) parser that does not require a special grammar format. (You can skip it if you're not interested)") * Offload non-essential extensions to later sections or to exercises. ("Implement a universal parser, using the Dragon Book") The key idea is that readers should be able to grasp the essentials of the problem and the solution in the beginning of the chapter, and get further into details as they progress through it. Make it easy for readers to be drawn in, providing insights of value quickly. If they are interested to understand how things work, they will get deeper into the topic. If they just want to use the technique (because they may be more interested in later chapters), having them read only the first few examples should be fine for them, too. Whatever you introduce should be motivated first, and illustrated after. Motivate the code you'll be writing, and use plenty of examples to show what the code just introduced is doing. Remember that readers should have fun interacting with your code and your examples. Show and tell again and again and again. ### Special Sections #### Quizzes You can have _quizzes_ as part of the notebook. These are created using the `quiz()` function. Its arguments are * The question * A list of options * The correct answer(s) - either * the single number of the one single correct answer (starting with 1) * a list of numbers of correct answers (multiple choices) To make the answer less obvious, you can specify it as a string containing an arithmetic expression evaluating to the desired number(s). The expression will remain in the code (and possibly be shown as hint in the quiz). ``` from bookutils import quiz # A single-choice quiz quiz("The color of the sky is", [ "blue", "red", "black" ], '5 - 4') # A multiple-choice quiz quiz("What is this book?", [ "Novel", "Friendly", "Useful" ], '[5 - 4, 1 + 1, 27 / 9]') ``` Cells that contain only the `quiz()` call will not be rendered (but the quiz will). #### Synopsis Each chapter should have a section named "Synopsis" at the very end: ```markdown ## Synopsis This is the text of the synopsis. ``` This section is evaluated at the very end of the notebook. It should summarize the most important functionality (classes, methods, etc.) together with examples. In the derived HTML and PDF files, it is rendered at the beginning, such that it can serve as a quick reference #### Excursions There may be longer stretches of text (and code!) that are too special, too boring, or too repetitve to read. You can mark such stretches as "Excursions" by enclosing them in MarkDown cells that state: ```markdown #### Excursion: TITLE ``` and ```markdown #### End of Excursion ``` Stretches between these two markers get special treatment when rendering: * In the resulting HTML output, these blocks are set up such that they are shown on demand only. * In printed (PDF) versions, they will be replaced by a pointer to the online version. * In the resulting slides, they will be omitted right away. Here is an example of an excursion: #### Excursion: Fine points on Excursion Cells Note that the `Excursion` and `End of Excursion` cells must be separate cells; they cannot be merged with others. #### End of Excursion ### Ignored Code If a code cell starts with ```python # ignore ``` then the code will not show up in rendered input. Its _output_ will, however. This is useful for cells that create drawings, for instance - the focus should be on the result, not the code. This also applies to cells that start with a call to `display()` or `quiz()`. ### Ignored Cells You can have _any_ cell not show up at all (including its output) in any rendered input by adding the following meta-data to the cell: ```json { "ipub": { "ignore": true } ``` *This* text, for instance, does not show up in the rendered version. ## Coding ### Set up The first code block in each notebook should be ``` import bookutils ``` This sets up stuff such that notebooks can import each other's code (see below). This import statement is removed in the exported Python code, as the .py files would import each other directly. Importing `bookutils` also sets a fixed _seed_ for random number generation. This way, whenever you execute a notebook from scratch (restarting the kernel), you get the exact same results; these results will also end up in the derived HTML and PDF files. (If you run a notebook or a cell for the second time, you will get more random results.) ### Coding Style and Consistency Here's a few rules regarding coding style. #### Use Python 3 We use Python 3 (specifically, Python 3.6) for all code. As of 2020, there is no need anymore to include compatibility hacks for Python 2. #### Follow Python Coding Conventions We use _standard Python coding conventions_ according to [PEP 8](https://www.python.org/dev/peps/pep-0008/). Your code must pass the `pycodestyle` style checks which you get by invoking `make style`. A very easy way to meet this goal is to invoke `make reformat`, which reformats all code accordingly. The `code prettify` notebook extension also allows you to automatically make your code (mostly) adhere to PEP 8. #### One Cell per Definition Use one cell for each definition or example. During importing, this makes it easier to decide which cells to import (see below). #### Identifiers In the book, this is how we denote `variables`, `functions()` and `methods()`, `Classes`, `Notebooks`, `variables_and_constants`, `EXPORTED_CONSTANTS`, `files`, `folders/`, and `<grammar-elements>`. #### Quotes If you have the choice between quoting styles, prefer * double quotes (`"strings"`) around strings that are used for interpolation or that are natural language messages, and * single quotes (`'characters'`) for single characters and formal language symbols that a end user would not see. #### Static Type Checking Use type annotations for all function definitions. #### Documentation Use documentation strings for all public classes and methods. #### Read More Beyond simple syntactical things, here's a [very nice guide](https://docs.python-guide.org/writing/style/) to get you started writing "pythonic" code. ### Importing Code from Notebooks To import the code of individual notebooks, you can import directly from .ipynb notebook files. ``` from DeltaDebugger import DeltaDebugger def fun(s: str) -> None: assert 'a' not in s with DeltaDebugger() as dd: fun("abc") dd ``` **Important**: When importing a notebook, the module loader will **only** load cells that start with * a function definition (`def`) * a class definition (`class`) * a variable definition if all uppercase (`ABC = 123`) * `import` and `from` statements All other cells are _ignored_ to avoid recomputation of notebooks and clutter of `print()` output. Exported Python code will import from the respective .py file instead. The exported Python code is set up such that only the above items will be imported. If importing a module prints out something (or has other side effects), that is an error. Use `make check-imports` to check whether your modules import without output. Import modules only as you need them, such that you can motivate them well in the text. ### Imports and Dependencies Try to depend on as few other notebooks as possible. This will not only ease construction and reconstruction of the code, but also reduce requirements for readers, giving then more flexibility in navigating through the book. When you import a notebook, this will show up as a dependency in the [Sitemap](00_Table_of_Contents.ipynb). If the imported module is not critical for understanding, and thus should not appear as a dependency in the sitemap, mark the import as "minor dependency" as follows: ``` from Intro_Debugging import remove_html_markup # minor dependency ``` ### Design and Architecture Stick to simple functions and data types. We want our readers to focus on functionality, not Python. You are encouraged to write in a "pythonic" style, making use of elegant Python features such as list comprehensions, sets, and more; however, if you do so, be sure to explain the code such that readers familiar with, say, C or Java can still understand things. ### Incomplete Examples When introducing examples for students to complete, use the ellipsis `...` to indicate where students should add code, as in here: ``` def student_example() -> None: x = some_computation() # type: ignore # Now, do something with x ... ``` The ellipsis is legal code in Python 3. (Actually, it is an `Ellipsis` object.) ### Introducing Classes Defining _classes_ can be a bit tricky, since all of a class must fit into a single cell. This defeats the incremental style preferred for notebooks. By defining a class _as a subclass of itself_, though, you can avoid this problem. Here's an example. We introduce a class `Foo`: ``` class Foo: def __init__(self) -> None: pass def bar(self) -> None: pass ``` Now we could discuss what `__init__()` and `bar()` do, or give an example of how to use them: ``` f = Foo() f.bar() ``` We now can introduce a new `Foo` method by subclassing from `Foo` into a class which is _also_ called `Foo`: ``` class Foo(Foo): def baz(self) -> None: pass ``` This is the same as if we had subclassed `Foo` into `Foo_1` with `Foo` then becoming an alias for `Foo_1`. The original `Foo` class is overshadowed by the new one: ``` new_f = Foo() new_f.baz() ``` Note, though, that _existing_ objects keep their original class: ``` from ExpectError import ExpectError with ExpectError(AttributeError): f.baz() # type: ignore ``` ## Helpers There's a couple of notebooks with helpful functions, including [Timer](Timer.ipynb), [ExpectError and ExpectTimeout](ExpectError.ipynb). Also check out the [Tracer](Tracer.ipynb) class. ### Quality Assurance In your code, make use of plenty of assertions that allow to catch errors quickly. These assertions also help your readers understand the code. ### Issue Tracker The [Github project page](https://github.com/uds-se/debuggingbook) allows to enter and track issues. ## Writing Text Text blocks use Markdown syntax. [Here is a handy guide](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet). ### Sections Any chapter notebook must begin with `# TITLE`, and sections and subsections should then follow by `## SECTION` and `### SUBSECTION`. Sections should start with their own block, to facilitate cross-referencing. ### Highlighting Use * _emphasis_ (`_emphasis_`) for highlighting, * *emphasis* (`*emphasis*`) for highlighting terms that will go into the index, * `backticks` for code and other verbatim elements. ### Hyphens and Dashes Use "–" for em-dashes, "-" for hyphens, and "$-$" for minus. ### Quotes Use standard typewriter quotes (`"quoted string"`) for quoted text. The PDF version will automatically convert these to "smart" (e.g. left and right) quotes. ### Lists and Enumerations You can use bulleted lists: * Item A * Item B and enumerations: 1. item 1 1. item 2 For description lists, use a combination of bulleted lists and highlights: * **PDF** is great for reading offline * **HTML** is great for reading online ### Math LaTeX math formatting works, too. `$x = \sum_{n = 1}^{\infty}\frac{1}{n}$` gets you $x = \sum_{n = 1}^{\infty}\frac{1}{n}$. ### Inline Code Python code normally goes into its own cells, but you can also have it in the text: ```python s = "Python syntax highlighting" print(s) ``` ### Images To insert images, use Markdown syntax `![Word cloud](PICS/wordcloud.png){width=100%}` inserts a picture from the `PICS` folder. ![Word cloud](PICS/wordcloud.png){width=100%} All pictures go to `PICS/`, both in source as well as derived formats; both are stored in git, too. (Not all of us have all tools to recreate diagrams, etc.) ### Footnotes Markdown supports footnotes, as in [^footnote]. These are rendered as footnotes in HTML and PDF, _but not within Jupyter_; hence, readers may find them confusing. So far, the book makes no use of footnotes, and uses parenthesized text instead. [^footnote]: Test, [Link](https://www.fuzzingbook.org). ### Floating Elements and References \todo[inline]{I haven't gotten this to work yet -- AZ} To produce floating elements in LaTeX and PDF, edit the metadata of the cell which contains it. (In the Jupyter Notebook Toolbar go to View -> Cell Toolbar -> Edit Metadata and a button will appear above each cell.) This allows you to control placement and create labels. #### Floating Figures Edit metadata as follows: ```json { "ipub": { "figure": { "caption": "Figure caption.", "label": "fig:flabel", "placement": "H", "height":0.4, "widefigure": false, } } } ``` - all tags are optional - height/width correspond to the fraction of the page height/width, only one should be used (aspect ratio will be maintained automatically) - `placement` is optional and constitutes using a placement arguments for the figure (e.g. \begin{figure}[H]). See [Positioning_images_and_tables](https://www.sharelatex.com/learn/Positioning_images_and_tables). - `widefigure` is optional and constitutes expanding the figure to the page width (i.e. \begin{figure*}) (placement arguments will then be ignored) #### Floating Tables For **tables** (e.g. those output by `pandas`), enter in cell metadata: ```json { "ipub": { "table": { "caption": "Table caption.", "label": "tbl:tlabel", "placement": "H", "alternate": "gray!20" } } } ``` - `caption` and `label` are optional - `placement` is optional and constitutes using a placement arguments for the table (e.g. \begin{table}[H]). See [Positioning_images_and_tables](https://www.sharelatex.com/learn/Positioning_images_and_tables). - `alternate` is optional and constitutes using alternating colors for the table rows (e.g. \rowcolors{2}{gray!25}{white}). See (https://tex.stackexchange.com/a/5365/107738)[https://tex.stackexchange.com/a/5365/107738]. - if tables exceed the text width, in latex, they will be shrunk to fit #### Floating Equations For **equations** (e.g. those output by `sympy`), enter in cell metadata: ```json { "ipub": { "equation": { "environment": "equation", "label": "eqn:elabel" } } } ``` - environment is optional and can be 'none' or any of those available in [amsmath](https://www.sharelatex.com/learn/Aligning_equations_with_amsmath); 'equation', 'align','multline','gather', or their \* variants. Additionally, 'breqn' or 'breqn\*' will select the experimental [breqn](https://ctan.org/pkg/breqn) environment to *smart* wrap long equations. - label is optional and will only be used if the equation is in an environment #### References To reference a floating object, use `\cref`, e.g. \cref{eq:texdemo} ### Cross-Referencing #### Section References * To refer to sections in the same notebook, use the header name as anchor, e.g. `[Code](#Code)` gives you [Code](#Code). For multi-word titles, replace spaces by hyphens (`-`), as in [Using Notebooks as Modules](#Using-Notebooks-as-Modules). * To refer to cells (e.g. equations or figures), you can define a label as cell metadata. See [Floating Elements and References](#Floating-Elements-and-References) for details. * To refer to other notebooks, use a Markdown cross-reference to the notebook file, e.g. [the "Debugger" chapter](Debugger.ipynb). A special script will be run to take care of these links. Reference chapters by name, not by number. ### Citations To cite papers, cite in LaTeX style. The text ``` print(r"\cite{Purdom1972}") ``` is expanded to \cite{Purdom1972}, which in HTML and PDF should be a nice reference. The keys refer to BibTeX entries in [fuzzingbook.bib](fuzzingbook.bib). * LaTeX/PDF output will have a "References" section appended. * HTML output will link to the URL field from the BibTeX entry. Be sure it points to the DOI. ### Todo's * To mark todo's, use `\todo{Thing to be done}.` \todo{Expand this} ### Tables Tables with fixed contents can be produced using Markdown syntax: | Tables | Are | Cool | | ------ | ---:| ----:| | Zebra | 2 | 30 | | Gnu | 20 | 400 | If you want to produce tables from Python data, the `PrettyTable` package (included in the book) allows to [produce tables with LaTeX-style formatting.](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook) ``` from bookutils import PrettyTable as pt import numpy as np data = np.array([[1, 2, 30], [2, 3, 400]]) pt.PrettyTable(data, [r"$\frac{a}{b}$", r"$b$", r"$c$"], print_latex_longtable=False) ``` ### Plots and Data It is possible to include plots in notebooks. Here is an example of plotting a function: ``` %matplotlib inline import matplotlib.pyplot as plt x = np.linspace(0, 3 * np.pi, 500) plt.plot(x, np.sin(x ** 2)) plt.title('A simple chirp'); ``` And here's an example of plotting data: ``` %matplotlib inline import matplotlib.pyplot as plt data = [25, 36, 57] plt.plot(data) plt.title('Increase in data'); ``` Plots are available in all derived versions (HTML, PDF, etc.) Plots with `plotly` are even nicer (and interactive, even in HTML), However, at this point, we cannot export them to PDF, so `matplotlib` it is. ## Slides You can set up the notebooks such that they also can be presented as slides. In the browser, select View -> Cell Toolbar -> Slideshow. You can then select a slide type for each cell: * `New slide` starts a new slide with the cell (typically, every `## SECTION` in the chapter) * `Sub-slide` starts a new sub-slide which you navigate "down" to (anything in the section) * `Fragment` is a cell that gets revealed after a click (on the same slide) * `Skip` is skipped during the slide show (e.g. `import` statements; navigation guides) * `Notes` goes into presenter notes To create slides, do `make slides`; to view them, change into the `slides/` folder and open the created HTML files. (The `reveal.js` package has to be in the same folder as the slide to be presented.) The ability to use slide shows is a compelling argument for teachers and instructors in our audience. (Hint: In a slide presentation, type `s` to see presenter notes.) ## Writing Tools When you're editing in the browser, you may find these extensions helpful: ### Jupyter Notebook [Jupyter Notebook Extensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions) is a collection of productivity-enhancing tools (including spellcheckers). I found these extensions to be particularly useful: * Spell Checker (while you're editing) * Table of contents (for quick navigation) * Code prettify (to produce "nice" syntax) * Codefolding * Live Markdown Preview (while you're editing) ### Jupyter Lab Extensions for _Jupyter Lab_ are much less varied and less supported, but things get better. I am running * [Spell Checker](https://github.com/ijmbarr/jupyterlab_spellchecker) * [Table of Contents](https://github.com/jupyterlab/jupyterlab-toc) * [JupyterLab-LSP](https://towardsdatascience.com/jupyterlab-2-0-edd4155ab897) providing code completion, signatures, style checkers, and more. ## Interaction It is possible to include interactive elements in a notebook, as in the following example: ```python try: from ipywidgets import interact, interactive, fixed, interact_manual x = interact(fuzzer, char_start=(32, 128), char_range=(0, 96)) except ImportError: pass ``` Note that such elements will be present in the notebook versions only, but not in the HTML and PDF versions, so use them sparingly (if at all). To avoid errors during production of derived files, protect against `ImportError` exceptions as in the above example. ## Read More Here is some documentation on the tools we use: 1. [Markdown Cheatsheet](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet) - general introduction to Markdown 1. [iPyPublish](https://github.com/chrisjsewell/ipypublish) - rich set of tools to create documents with citations and references ## Alternative Tool Sets We don't currently use these, but they are worth learning: 1. [Making Publication-Ready Python Notebooks](http://blog.juliusschulz.de/blog/ultimate-ipython-notebook) - Another tool set on how to produce book chapters from notebooks 1. [Writing academic papers in plain text with Markdown and Jupyter notebook](https://sylvaindeville.net/2015/07/17/writing-academic-papers-in-plain-text-with-markdown-and-jupyter-notebook/) - Alternate ways on how to generate citations 1. [A Jupyter LaTeX template](https://gist.github.com/goerz/d5019bedacf5956bcf03ca8683dc5217#file-revtex-tplx) - How to define a LaTeX template 1. [Boost Your Jupyter Notebook Productivity](https://towardsdatascience.com/jupyter-notebook-hints-1f26b08429ad) - a collection of hints for debugging and profiling Jupyter notebooks
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Writing layers and models with TensorFlow Keras <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/alpha/guide/keras/custom_layers_and_models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/guide/keras/custom_layers_and_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/guide/keras/custom_layers_and_models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> ### Setup ``` from __future__ import absolute_import, division, print_function !pip install tensorflow-gpu==2.0.0-alpha0 import tensorflow as tf tf.keras.backend.clear_session() # For easy reset of notebook state. ``` ## The Layer class ### Layers encapsulate a state (weights) and some computation The main data structure you'll work with is the `Layer`. A layer encapsulates both a state (the layer's "weights") and a transformation from inputs to outputs (a "call", the layer's forward pass). Here's a densely-connected layer. It has a state: the variables `w` and `b`. ``` from tensorflow.keras import layers class Linear(layers.Layer): def __init__(self, units=32, input_dim=32): super(Linear, self).__init__() w_init = tf.random_normal_initializer() self.w = tf.Variable(initial_value=w_init(shape=(input_dim, units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable(initial_value=b_init(shape=(units,), dtype='float32'), trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b x = tf.ones((2, 2)) linear_layer = Linear(4, 2) y = linear_layer(x) print(y) ``` Note that the weights `w` and `b` are automatically tracked by the layer upon being set as layer attributes: ``` assert linear_layer.weights == [linear_layer.w, linear_layer.b] ``` Note you also have access to a quicker shortcut for adding weight to a layer: the `add_weight` method: ``` class Linear(layers.Layer): def __init__(self, units=32, input_dim=32): super(Linear, self).__init__() self.w = self.add_weight(shape=(input_dim, units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(units,), initializer='zeros', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b x = tf.ones((2, 2)) linear_layer = Linear(4, 2) y = linear_layer(x) print(y) ``` #### Layers can have non-trainable weights Besides trainable weights, you can add non-trainable weights to a layer as well. Such weights are meant not to be taken into account during backpropagation, when you are training the layer. Here's how to add and use a non-trainable weight: ``` class ComputeSum(layers.Layer): def __init__(self, input_dim): super(ComputeSum, self).__init__() self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total x = tf.ones((2, 2)) my_sum = ComputeSum(2) y = my_sum(x) print(y.numpy()) y = my_sum(x) print(y.numpy()) ``` It's part of `layer.weights`, but it gets categorized as a non-trainable weight: ``` print('weights:', len(my_sum.weights)) print('non-trainable weights:', len(my_sum.non_trainable_weights)) # It's not included in the trainable weights: print('trainable_weights:', my_sum.trainable_weights) ``` ### Best practice: deferring weight creation until the shape of the inputs is known In the logistic regression example above, our `Linear` layer took an `input_dim` argument that was used to compute the shape of the weights `w` and `b` in `__init__`: ``` class Linear(layers.Layer): def __init__(self, units=32, input_dim=32): super(Linear, self).__init__() self.w = self.add_weight(shape=(input_dim, units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(units,), initializer='random_normal', trainable=True) ``` In many cases, you may not know in advance the size of your inputs, and you would like to lazily create weights when that value becomes known, some time after instantiating the layer. In the Keras API, we recommend creating layer weights in the `build(inputs_shape)` method of your layer. Like this: ``` class Linear(layers.Layer): def __init__(self, units=32): super(Linear, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b ``` The `__call__` method of your layer will automatically run `build` the first time it is called. You now have a layer that's lazy and easy to use: ``` linear_layer = Linear(32) # At instantiation, we don't know on what inputs this is going to get called y = linear_layer(x) # The layer's weights are created dynamically the first time the layer is called ``` ### Layers are recursively composable If you assign a Layer instance as attribute of another Layer, the outer layer will start tracking the weights of the inner layer. We recommend creating such sublayers in the `__init__` method (since the sublayers will typically have a `build` method, they will be built when the outer layer gets built). ``` # Let's assume we are reusing the Linear class # with a `build` method that we defined above. class MLPBlock(layers.Layer): def __init__(self): super(MLPBlock, self).__init__() self.linear_1 = Linear(32) self.linear_2 = Linear(32) self.linear_3 = Linear(1) def call(self, inputs): x = self.linear_1(inputs) x = tf.nn.relu(x) x = self.linear_2(x) x = tf.nn.relu(x) return self.linear_3(x) mlp = MLPBlock() y = mlp(tf.ones(shape=(3, 64))) # The first call to the `mlp` will create the weights print('weights:', len(mlp.weights)) print('trainable weights:', len(mlp.trainable_weights)) ``` ### Layers recursively collect losses created during the forward pass When writing the `call` method of a layer, you can create loss tensors that you will want to use later, when writing your training loop. This is doable by calling `self.add_loss(value)`: ``` # A layer that creates an activity regularization loss class ActivityRegularizationLayer(layers.Layer): def __init__(self, rate=1e-2): super(ActivityRegularizationLayer, self).__init__() self.rate = rate def call(self, inputs): self.add_loss(self.rate * tf.reduce_sum(inputs)) return inputs ``` These losses (including those created by any inner layer) can be retrieved via `layer.losses`. This property is reset at the start of every `__call__` to the top-level layer, so that `layer.losses` always contains the loss values created during the last forward pass. ``` class OuterLayer(layers.Layer): def __init__(self): super(OuterLayer, self).__init__() self.activity_reg = ActivityRegularizationLayer(1e-2) def call(self, inputs): return self.activity_reg(inputs) layer = OuterLayer() assert len(layer.losses) == 0 # No losses yet since the layer has never been called _ = layer(tf.zeros(1, 1)) assert len(layer.losses) == 1 # We created one loss value # `layer.losses` gets reset at the start of each __call__ _ = layer(tf.zeros(1, 1)) assert len(layer.losses) == 1 # This is the loss created during the call above ``` In addition, the `loss` property also contains regularization losses created for the weights of any inner layer: ``` class OuterLayer(layers.Layer): def __init__(self): super(OuterLayer, self).__init__() self.dense = layers.Dense(32, kernel_regularizer=tf.keras.regularizers.l2(1e-3)) def call(self, inputs): return self.dense(inputs) layer = OuterLayer() _ = layer(tf.zeros((1, 1))) # This is `1e-3 * sum(layer.dense.kernel)`, # created by the `kernel_regularizer` above. print(layer.losses) ``` These losses are meant to be taken into account when writing training loops, like this: ```python # Instantiate an optimizer. optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3) loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Iterate over the batches of a dataset. for x_batch_train, y_batch_train in train_dataset: with tf.GradientTape() as tape: logits = layer(x_batch_train) # Logits for this minibatch # Loss value for this minibatch loss_value = loss_fn(y_batch_train, logits)) # Add extra losses created during this forward pass: loss_value += sum(model.losses) grads = tape.gradient(loss_value, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) ``` For a detailed guide about writing training loops, see the second section of the [Guide to Training & Evaluation](./training_and_evaluation.ipynb). ### You can optionally enable serialization on your layers If you need your custom layers to be serializable as part of a [Functional model](./functional.ipynb), you can optionally implement a `get_config` method: ``` class Linear(layers.Layer): def __init__(self, units=32): super(Linear, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b def get_config(self): return {'units': self.units} # Now you can recreate the layer from its config: layer = Linear(64) config = layer.get_config() print(config) new_layer = Linear.from_config(config) ``` Note that the `__init__` method of the base `Layer` class takes some keyword arguments, in particular a `name` and a `dtype`. It's good practice to pass these arguments to the parent class in `__init__` and to include them in the layer config: ``` class Linear(layers.Layer): def __init__(self, units=32, **kwargs): super(Linear, self).__init__(**kwargs) self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b def get_config(self): config = super(Linear, self).get_config() config.update({'units': self.units}) return config layer = Linear(64) config = layer.get_config() print(config) new_layer = Linear.from_config(config) ``` If you need more flexibility when deserializing the layer from its config, you can also override the `from_config` class method. This is the base implementation of `from_config`: ```python def from_config(cls, config): return cls(**config) ``` To learn more about serialization and saving, see the complete [Guide to Saving and Serializing Models](./saving_and_serializing.ipynb). ### Privileged `training` argument in the `call` method Some layers, in particular the `BatchNormalization` layer and the `Dropout` layer, have different behaviors during training and inference. For such layers, it is standard practice to expose a `training` (boolean) argument in the `call` method. By exposing this argument in `call`, you enable the built-in training and evaluation loops (e.g. `fit`) to correctly use the layer in training and inference. ``` class CustomDropout(layers.Layer): def __init__(self, rate, **kwargs): super(CustomDropout, self).__init__(**kwargs) self.rate = rate def call(self, inputs, training=None): return tf.cond(training, lambda: tf.nn.dropout(inputs, rate=self.rate), lambda: inputs) ``` ## Building Models ### The Model class In general, you will use the `Layer` class to define inner computation blocks, and will use the `Model` class to define the outer model -- the object you will train. For instance, in a ResNet50 model, you would have several ResNet blocks subclassing `Layer`, and a single `Model` encompassing the entire ResNet50 network. The `Model` class has the same API as `Layer`, with the following differences: - It exposes built-in training, evaluation, and prediction loops (`model.fit()`, `model.evaluate()`, `model.predict()`). - It exposes the list of its inner layers, via the `model.layers` property. - It exposes saving and serialization APIs. Effectively, the "Layer" class corresponds to what we refer to in the literature as a "layer" (as in "convolution layer" or "recurrent layer") or as a "block" (as in "ResNet block" or "Inception block"). Meanwhile, the "Model" class corresponds to what is referred to in the literature as a "model" (as in "deep learning model") or as a "network" (as in "deep neural network"). For instance, we could take our mini-resnet example above, and use it to build a `Model` that we could train with `fit()`, and that we could save with `save_weights`: ```python class ResNet(tf.keras.Model): def __init__(self): super(ResNet, self).__init__() self.block_1 = ResNetBlock() self.block_2 = ResNetBlock() self.global_pool = layers.GlobalAveragePooling2D() self.classifier = Dense(num_classes) def call(self, inputs): x = self.block_1(inputs) x = self.block_2(x) x = self.global_pool(x) return self.classifier(x) resnet = ResNet() dataset = ... resnet.fit(dataset, epochs=10) resnet.save_weights(filepath) ``` ### Putting it all together: an end-to-end example Here's what you've learned so far: - A `Layer` encapsulate a state (created in `__init__` or `build`) and some computation (in `call`). - Layers can be recursively nested to create new, bigger computation blocks. - Layers can create and track losses (typically regularization losses). - The outer container, the thing you want to train, is a `Model`. A `Model` is just like a `Layer`, but with added training and serialization utilities. Let's put all of these things together into an end-to-end example: we're going to implement a Variational AutoEncoder (VAE). We'll train it on MNIST digits. Our VAE will be a subclass of `Model`, built as a nested composition of layers that subclass `Layer`. It will feature a regularization loss (KL divergence). ``` class Sampling(layers.Layer): """Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.""" def call(self, inputs): z_mean, z_log_var = inputs batch = tf.shape(z_mean)[0] dim = tf.shape(z_mean)[1] epsilon = tf.keras.backend.random_normal(shape=(batch, dim)) return z_mean + tf.exp(0.5 * z_log_var) * epsilon class Encoder(layers.Layer): """Maps MNIST digits to a triplet (z_mean, z_log_var, z).""" def __init__(self, latent_dim=32, intermediate_dim=64, name='encoder', **kwargs): super(Encoder, self).__init__(name=name, **kwargs) self.dense_proj = layers.Dense(intermediate_dim, activation='relu') self.dense_mean = layers.Dense(latent_dim) self.dense_log_var = layers.Dense(latent_dim) self.sampling = Sampling() def call(self, inputs): x = self.dense_proj(inputs) z_mean = self.dense_mean(x) z_log_var = self.dense_log_var(x) z = self.sampling((z_mean, z_log_var)) return z_mean, z_log_var, z class Decoder(layers.Layer): """Converts z, the encoded digit vector, back into a readable digit.""" def __init__(self, original_dim, intermediate_dim=64, name='decoder', **kwargs): super(Decoder, self).__init__(name=name, **kwargs) self.dense_proj = layers.Dense(intermediate_dim, activation='relu') self.dense_output = layers.Dense(original_dim, activation='sigmoid') def call(self, inputs): x = self.dense_proj(inputs) return self.dense_output(x) class VariationalAutoEncoder(tf.keras.Model): """Combines the encoder and decoder into an end-to-end model for training.""" def __init__(self, original_dim, intermediate_dim=64, latent_dim=32, name='autoencoder', **kwargs): super(VariationalAutoEncoder, self).__init__(name=name, **kwargs) self.original_dim = original_dim self.encoder = Encoder(latent_dim=latent_dim, intermediate_dim=intermediate_dim) self.decoder = Decoder(original_dim, intermediate_dim=intermediate_dim) def call(self, inputs): z_mean, z_log_var, z = self.encoder(inputs) reconstructed = self.decoder(z) # Add KL divergence regularization loss. kl_loss = - 0.5 * tf.reduce_mean( z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1) self.add_loss(kl_loss) return reconstructed original_dim = 784 vae = VariationalAutoEncoder(original_dim, 64, 32) optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3) mse_loss_fn = tf.keras.losses.MeanSquaredError() loss_metric = tf.keras.metrics.Mean() (x_train, _), _ = tf.keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 train_dataset = tf.data.Dataset.from_tensor_slices(x_train) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64) # Iterate over epochs. for epoch in range(3): print('Start of epoch %d' % (epoch,)) # Iterate over the batches of the dataset. for step, x_batch_train in enumerate(train_dataset): with tf.GradientTape() as tape: reconstructed = vae(x_batch_train) # Compute reconstruction loss loss = mse_loss_fn(x_batch_train, reconstructed) loss += sum(vae.losses) # Add KLD regularization loss grads = tape.gradient(loss, vae.trainable_variables) optimizer.apply_gradients(zip(grads, vae.trainable_variables)) loss_metric(loss) if step % 100 == 0: print('step %s: mean loss = %s' % (step, loss_metric.result())) ``` Note that since the VAE is subclassing `Model`, it features built-in training loops. So you could also have trained it like this: ``` vae = VariationalAutoEncoder(784, 64, 32) optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3) vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError()) vae.fit(x_train, x_train, epochs=3, batch_size=64) ``` ### Beyond object-oriented development: the Functional API Was this example too much object-oriented development for you? You can also build models using [the Functional API](./functional.ipynb). Importantly, choosing one style or another does not prevent you from leveraging components written in the other style: you can always mix-and-match. For instance, the Functional API example below reuses the same `Sampling` layer we defined in the example above. ``` original_dim = 784 intermediate_dim = 64 latent_dim = 32 # Define encoder model. original_inputs = tf.keras.Input(shape=(original_dim,), name='encoder_input') x = layers.Dense(intermediate_dim, activation='relu')(original_inputs) z_mean = layers.Dense(latent_dim, name='z_mean')(x) z_log_var = layers.Dense(latent_dim, name='z_log_var')(x) z = Sampling()((z_mean, z_log_var)) encoder = tf.keras.Model(inputs=original_inputs, outputs=z, name='encoder') # Define decoder model. latent_inputs = tf.keras.Input(shape=(latent_dim,), name='z_sampling') x = layers.Dense(intermediate_dim, activation='relu')(latent_inputs) outputs = layers.Dense(original_dim, activation='sigmoid')(x) decoder = tf.keras.Model(inputs=latent_inputs, outputs=outputs, name='decoder') # Define VAE model. outputs = decoder(z) vae = tf.keras.Model(inputs=original_inputs, outputs=outputs, name='vae') # Add KL divergence regularization loss. kl_loss = - 0.5 * tf.reduce_mean( z_log_var - tf.square(z_mean) - tf.exp(z_log_var) + 1) vae.add_loss(kl_loss) # Train. optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3) vae.compile(optimizer, loss=tf.keras.losses.MeanSquaredError()) vae.fit(x_train, x_train, epochs=3, batch_size=64) ```
github_jupyter
# LODES Data Analysis ## Prepare Workbook ``` import numpy as np from pandas import Series, DataFrame import pandas as pd import urllib from urllib2 import urlopen from StringIO import StringIO import gzip import requests import json import os from copy import deepcopy from pandas.io.json import json_normalize # Set create working folder and set as active directory os.chdir('C:\Users\dcapizzi\Documents\GitHub') if not os.path.exists('lodes'): os.makedirs('lodes') os.chdir('C:\Users\dcapizzi\Documents\GitHub\lodes') ``` ## Load initial LODES data ``` # Collect user input for the year and states to download for the analysis year = raw_input('Enter a year: ') input_list = raw_input("Enter states to include separated by commas (no spaces): ") state_list = input_list.split(',') # Define final data frames to aggregate all state data lodes_columns = ['w_geocode', 'h_geocode', 'tot_jobs', 'age_29_bel_jobs', 'age_30_54_jobs', 'age_55_over_jobs', 'sal_1250_bel_jobs', 'sal_1250_3333_jobs', 'sal_3333_over_jobs', 'goods_prod_jobs', 'trade_transp_jobs', 'all_other_svc_jobs', 'createdate', 'state', 'w_block', 'h_block', 'w_2010_block', 'w_state', 'w_county_name', 'w_block_group_code', 'w_block_group_name', 'w_metro_name', 'w_zip_code', 'w_place_name', 'w_county_sub_name', 'w_createdate', 'h_2010_block', 'h_state', 'h_county_name', 'h_block_group_code', 'h_block_group_name', 'h_metro_name', 'h_zip_code', 'h_place_name', 'h_county_sub_name', 'h_createdate'] lodes_data = pd.DataFrame([],columns=lodes_columns) # Create dictionaries to house downloaded files dict_lodes = {} dict_xwalk = {} # Loop through all states selected by user, download the relevant files from the Census website, unzip, read, and load into dictionaries # Process takes some time, please be patient for state in state_list: # Sets url for primary "LODES" data set - which provides data on the home Census block, work Census block, and commuters in between lodes_url = 'http://lehd.ces.census.gov/data/lodes/LODES7/' + state.lower() + '/od/' + state.lower() + '_od_main_JT00_' + year + '.csv.gz' # Sets url for "cross-walk" data with the city, state, ZIP, etc. for each Census block xwalk_url = 'http://lehd.ces.census.gov/data/lodes/LODES7/' + state.lower() + '/' + state.lower() + '_xwalk.csv.gz' # Names the files lodes_filename = 'lodes_' + state + "_" + year + '.csv.gz' xwalk_filename = 'xwalk_' + state + "_" + year + '.csv.gz' # Downloads the files urllib.urlretrieve(lodes_url, lodes_filename) urllib.urlretrieve(xwalk_url, xwalk_filename) print 'Data downloaded for '+state # Unzips the files unzip_lodes = gzip.open(lodes_filename, 'rb') unzip_xwalk = gzip.open(xwalk_filename, 'rb') # Reads the files to disk unzip_lodes = unzip_lodes.read() unzip_xwalk = unzip_xwalk.read() # Saves as objects in teh created dictionaries dict_lodes[state]=pd.read_csv(StringIO(unzip_lodes)) dict_xwalk[state]=pd.read_csv(StringIO(unzip_xwalk)) print 'Data tables created for '+state # Removes unnecessary fields and names the columns to consistent, human-readable names dict_lodes[state].columns = ['w_geocode','h_geocode','tot_jobs','age_29_bel_jobs', 'age_30_54_jobs','age_55_over_jobs','sal_1250_bel_jobs','sal_1250_3333_jobs','sal_3333_over_jobs', 'goods_prod_jobs','trade_transp_jobs','all_other_svc_jobs','createdate'] dict_xwalk[state] = DataFrame(dict_xwalk[state],columns=['tabblk2010','stusps','ctyname', 'bgrp','bgrpname','cbsaname','zcta','stplcname','ctycsubname','createdate']) dict_xwalk[state].columns = ['2010_block', 'state', 'county_name', 'block_group_code', 'block_group_name','metro_name', 'zip_code','place_name', 'county_sub_name','createdate'] print 'Column names defined for '+state # Creates 'block-group-level' field to join LODES to xwalk and centroid lat/longs (Census block group codes are the first 12 digits of Census block codes) left = lambda x: str(int(x))[:12] dict_lodes[state]['w_block'] = dict_lodes[state]['w_geocode'].apply(left) dict_lodes[state]['w_block'] = dict_lodes[state]['w_geocode'].apply(left) dict_lodes[state]['h_block'] = dict_lodes[state]['h_geocode'].apply(left) dict_xwalk[state]['block_group_code']= dict_xwalk[state]['block_group_code'].apply(left) dict_lodes[state]['state'] = state print 'New fields created for '+state print 'Process complete!' # Create blank dictionaries to join or merge cross-walk data with LODES data dict_xwalk_w = {} dict_xwalk_h = {} # Duplicay (copy) cross-walk data, with columns one for work, one for home for state in dict_xwalk: dict_xwalk_w[state] = deepcopy(dict_xwalk[state]) dict_xwalk_h[state] = deepcopy(dict_xwalk[state]) dict_xwalk_w[state].rename(columns=lambda x: "w_"+x, inplace="True") dict_xwalk_h[state].rename(columns=lambda x: "h_"+x, inplace="True") # For each state in dict_lodes, merge once on the "work" Census block (w_geocode) and once on the "home" Census block (h_geocode) # This data will provide an idea of the city/state/zip for both the work and home block code groups for state in dict_lodes: dict_lodes[state] = pd.merge(dict_lodes[state], dict_xwalk_w[state], how='left', left_on='w_geocode', right_on='w_2010_block') dict_lodes[state] = pd.merge(dict_lodes[state], dict_xwalk_h[state], how='left', left_on='h_geocode', right_on='h_2010_block') lodes_data = lodes_data.append(dict_lodes[state]) lodes_data.columns ``` ## Transform LODES data for analysis ``` # Create new field "home to work" with both home and work geocodes lodes_data['unique'] = lodes_data['h_geocode'].map('{0:f}'.format).astype(str).apply(lambda x: x[:15]) + ' to ' + lodes_data['w_geocode'].map('{0:f}'.format).astype(str).apply(lambda x: x[:15]) # Take new data set, and split into "home" and "work" tables to be flattened lodes_data_home = DataFrame(lodes_data, columns = ['unique','h_geocode', 'tot_jobs', 'age_29_bel_jobs', 'age_30_54_jobs', 'age_55_over_jobs', 'sal_1250_bel_jobs', 'sal_1250_3333_jobs', 'sal_3333_over_jobs', 'goods_prod_jobs', 'trade_transp_jobs', 'all_other_svc_jobs', 'h_block', 'h_state', 'h_county_name', 'h_block_group_code', 'h_block_group_name', 'h_metro_name', 'h_zip_code', 'h_place_name', 'h_county_sub_name']) lodes_data_home['type']='Home' lodes_data_home['path']=1 lodes_data_work = DataFrame(lodes_data, columns = ['unique','w_geocode', 'tot_jobs', 'age_29_bel_jobs', 'age_30_54_jobs', 'age_55_over_jobs', 'sal_1250_bel_jobs', 'sal_1250_3333_jobs', 'sal_3333_over_jobs', 'goods_prod_jobs', 'trade_transp_jobs', 'all_other_svc_jobs', 'w_block', 'w_state', 'w_county_name', 'w_block_group_code', 'w_block_group_name', 'w_metro_name', 'w_zip_code', 'w_place_name', 'w_county_sub_name']) lodes_data_work['type']='Work' lodes_data_work['path']=2 # Rename columns to be the same for both new tables new_columns = ['unique','geocode', 'tot_jobs', 'age_29_bel_jobs', 'age_30_54_jobs', 'age_55_over_jobs', 'sal_1250_bel_jobs', 'sal_1250_3333_jobs', 'sal_3333_over_jobs', 'goods_prod_jobs', 'trade_transp_jobs', 'all_other_svc_jobs', 'block', 'state', 'county_name', 'block_group_code', 'block_group_name', 'metro_name', 'zip_code', 'place_name', 'county_sub_name','type','path'] lodes_data_home.columns = new_columns lodes_data_work.columns = new_columns # Append both tables and sort by Path ID lodes_data_flat = lodes_data_home.append(lodes_data_work) lodes_data_flat = lodes_data_flat.sort(['unique','path']).reset_index(drop=True) lodes_data_flat[:3] ``` ## Add additional data on latitude, longitude, and demographics into data set ``` # read in data with latitudes, longitudes, and other data sources latlong = pd.read_csv('DDL_census_data.csv') # Rename columns latlong.columns = ['state', 'county', 'tract', 'blockgrouppiece', 'full_geo_id', 'geoid', 'name', u'lsad', 'land_area', 'water_area', 'latitude', 'longitude', 'id', 'geoid2', 'geoid3', 'geo_display','median_income','moe_median_income', 'geoid4', 'geoid5', 'geo_display2', 'total','moe_total:', 'foodstamps','moe_foodstamps', 'foodstamps_disability','moe_foodstamps_disability','foodstamps_nodisability','moe_foodstamps_nodisability', 'nofoodstamps','moe_nofoodstamps', 'nofoodstamps_disability','moe_nofoodstamps_disability', 'nofoodstamps_nodisability','moe_nofoodstamps_nodisability'] # Reformat columns latlong['full_geo_id'] = latlong['full_geo_id'].apply(lambda x: x[9:]) # Eliminate unnecessary columns latlong = DataFrame(latlong, columns = ['full_geo_id', 'latitude', 'longitude', 'foodstamps','moe_foodstamps', 'foodstamps_disability','moe_foodstamps_disability','foodstamps_nodisability','moe_foodstamps_nodisability', 'nofoodstamps','moe_nofoodstamps', 'nofoodstamps_disability','moe_nofoodstamps_disability', 'nofoodstamps_nodisability','moe_nofoodstamps_nodisability']) lodes_data_full = pd.merge(lodes_data_flat, latlong, how='left', left_on='block_group_code', right_on='full_geo_id') ``` ## Add additional data on transit for metro ``` lodes_data_full['category']='lodes' lodes_data_full from sqlalchemy import create_engine sqlite_file = 'sqlite://///Users/Kruthika/Projects/DDL/04-team3/census.db' engine = create_engine(sqlite_file) from pandas.io import sql sql.execute('DROP TABLE IF EXISTS lodes_data',engine) lodes_data_full.to_sql('lodes_data', engine) import requests import json import pandas as pd from pandas.io.json import json_normalize from urllib2 import urlopen #Get station-level descriptive data from WMATA API, including latitude and longitude of stations and line codes r = requests.get('https://api.wmata.com/Rail.svc/json/jStations?api_key=fb7119a0d3464673825a26e94db74451') data_list = [] for entrances in r.json()['Stations']: for e in entrances.keys(): if e not in data_list: data_list.append(e) print data_list metro_stations = json_normalize(r.json()['Stations']) metro_stations.head(3) metro_stations.to_csv('stations.csv') #Get bus route descriptive data from WMATA API, including latitude and longitude of stations and route codes r1 = requests.get('https://api.wmata.com/Bus.svc/json/jStops?api_key=fb7119a0d3464673825a26e94db74451') stops_list = [] for stops in r1.json()['Stops']: for s in stops.keys(): if s not in stops_list: stops_list.append(s) print stops_list bus_stops = json_normalize(r1.json()['Stops']) bus_stops.head(3) s = bus_stops.apply(lambda x: pd.Series(x['Routes']),axis=1).stack().reset_index(level=1, drop=True) s.name = 'Routes' bus_routes = bus_stops.drop('Routes', axis=1).join(s) bus_routes['category'] = 'bus' bus_routes['type'] = 'bus' bus_routes.columns = ['latitude','longitude','name','unique','detail','category','type'] bus_routes[:6] bus_routes.to_csv('busroutes.csv') #Get path-level train data from WMATA API, including latitude and longitude of stations and line codes rblue = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=J03&ToStationCode=G05&api_key=fb7119a0d3464673825a26e94db74451') rgreen = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=F11&ToStationCode=E10&api_key=fb7119a0d3464673825a26e94db74451') rorange = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=K08&ToStationCode=D13&api_key=fb7119a0d3464673825a26e94db74451') rred = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=A15&ToStationCode=B11&api_key=fb7119a0d3464673825a26e94db74451') rsilver = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=N06&ToStationCode=G05&api_key=fb7119a0d3464673825a26e94db74451') ryellow = requests.get('https://api.wmata.com/Rail.svc/json/jPath?FromStationCode=C15&ToStationCode=E06&api_key=fb7119a0d3464673825a26e94db74451') data_list = [] for paths in rblue.json()['Path']: for p in paths.keys(): if p not in data_list: data_list.append(p) print data_list dfblue = json_normalize(rblue.json()['Path']) dfgreen = json_normalize(rgreen.json()['Path']) dforange = json_normalize(rorange.json()['Path']) dfred = json_normalize(rred.json()['Path']) dfsilver = json_normalize(rsilver.json()['Path']) dfyellow = json_normalize(ryellow.json()['Path']) metro_lines = pd.concat([dfblue, dfgreen, dforange, dfred, dfsilver, dfyellow], ignore_index=True) metro_lines.head(3) metro_combined = pd.merge(metro_lines, metro_stations, how='left', left_on='StationCode', right_on='Code') metro_combined.head(3) metro_combined = DataFrame(metro_combined,columns=['LineCode','SeqNum', 'StationName','Address.City','Address.State','Address.Zip','Lat','Lon']) metro_combined.columns = ['unique','path','name','metro_name','state','zip','latitude','longitude'] metro_combined['type']='train' metro_combined['category']='train' metro_combined.head(3) metro_combined.to_csv('trainandroute.csv') ``` ## Blend all data sets together ``` lodes_transit_data = pd.concat([lodes_data_full, bus_routes, metro_combined], ignore_index=True) lodes_transit_data[:3] lodes_transit_data [lodes_transit_data['category']=='train'][:5] lodes_transit_data.to_csv('lodes_final_output.csv') ```
github_jupyter
# Implementing the Gradient Descent Algorithm In this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data. ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd #Some helper functions for plotting and drawing lines def plot_points(X, y): admitted = X[np.argwhere(y==1)] rejected = X[np.argwhere(y==0)] plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k') plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k') def display(m, b, color='g--'): plt.xlim(-0.05,1.05) plt.ylim(-0.05,1.05) x = np.arange(-10, 10, 0.1) plt.plot(x, m*x+b, color) ``` ## Reading and plotting the data ``` data = pd.read_csv('data.csv', header=None) X = np.array(data[[0,1]]) y = np.array(data[2]) plot_points(X,y) plt.show() ``` ## TODO: Implementing the basic functions Here is your turn to shine. Implement the following formulas, as explained in the text. - Sigmoid activation function $$\sigma(x) = \frac{1}{1+e^{-x}}$$ - Output (prediction) formula $$\hat{y} = \sigma(w_1 x_1 + w_2 x_2 + b)$$ - Error function $$Error(y, \hat{y}) = - y \log(\hat{y}) - (1-y) \log(1-\hat{y})$$ - The function that updates the weights $$ w_i \longrightarrow w_i + \alpha (y - \hat{y}) x_i$$ $$ b \longrightarrow b + \alpha (y - \hat{y})$$ ``` # Implement the following functions # Activation (sigmoid) function def sigmoid(x): return 1/(1+np.exp(-x)) # Output (prediction) formula def output_formula(features, weights, bias): return sigmoid(np.matmul(features, weights)+bias) # Error (log-loss) formula def error_formula(y, output): return -y*np.log(output)-(1-y)*np.log(1-output) # Gradient descent step def update_weights(x, y, weights, bias, learnrate): output = output_formula(x, weights, bias) new_weights = weights + learnrate*(y-output)*x new_bias = bias + learnrate*(y-output) return new_weights, new_bias ``` ## Training function This function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm. ``` np.random.seed(44) epochs = 100 learnrate = 0.01 def train(features, targets, epochs, learnrate, graph_lines=False): errors = [] n_records, n_features = features.shape last_loss = None weights = np.random.normal(scale=1 / n_features**.5, size=n_features) bias = 0 for e in range(epochs): del_w = np.zeros(weights.shape) for x, y in zip(features, targets): output = output_formula(x, weights, bias) error = error_formula(y, output) weights, bias = update_weights(x, y, weights, bias, learnrate) # Printing out the log-loss error on the training set out = output_formula(features, weights, bias) loss = np.mean(error_formula(targets, out)) errors.append(loss) if e % (epochs / 10) == 0: print("\n========== Epoch", e,"==========") if last_loss and last_loss < loss: print("Train loss: ", loss, " WARNING - Loss Increasing") else: print("Train loss: ", loss) last_loss = loss predictions = out > 0.5 accuracy = np.mean(predictions == targets) print("Accuracy: ", accuracy) if graph_lines and e % (epochs / 100) == 0: display(-weights[0]/weights[1], -bias/weights[1]) # Plotting the solution boundary plt.title("Solution boundary") display(-weights[0]/weights[1], -bias/weights[1], 'black') # Plotting the data plot_points(features, targets) plt.show() # Plotting the error plt.title("Error Plot") plt.xlabel('Number of epochs') plt.ylabel('Error') plt.plot(errors) plt.show() ``` ## Time to train the algorithm! When we run the function, we'll obtain the following: - 10 updates with the current training loss and accuracy - A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs. - A plot of the error function. Notice how it decreases as we go through more epochs. ``` train(X, y, epochs, learnrate, True) ```
github_jupyter
# MALDI acquisition of predefined areas author: Alex Mattausch version: 0.1.0 ``` %load_ext autoreload %autoreload 2 # "%matplotlib widget" is slightly better, but sometimes doesn't work # "%matplotlib notebook" or "%matplotlib inline" can be used as alternatives %matplotlib widget import matplotlib.pyplot as plt import remote_control.control as rc import remote_control.utils as utils #from IPython.display import set_matplotlib_formats #set_matplotlib_formats('svg') from remote_control import acquisition from remote_control.control import configure_fly_at_fixed_z from itertools import product CONFIG_FN = 'remote_config.json' ### IN CASE OF ERROR, make sure Jupyter is set to use the "Python [conda env:maldi-control-notebooks]" kernel qa = acquisition.QueueAquisition( config_fn = CONFIG_FN, datadir="./data" # will save spatial position file here ) # For plates with recessed wells, configure this to move the slide away when moving between wells. # If the stage needs to move in the X/Y plane more than "distance", it will move the stage's Z axis # to the value of the "z" parameter. # configure_fly_at_fixed_z(distance=2000, z=3000) # Enable configure_fly_at_fixed_z(distance=None, z=None) # Disable ``` ### 1. Define slide area ``` # Set up safety bounds (optional - comment this out if they're unwanted) qa.set_image_bounds( min_x=-15000, max_x=15000, min_y=-25000, max_y=25000, ) ``` ### 2. Add acquisition areas Run this cell to clear areas and start over: ``` qa.clear_areas() qa.add_area( name="well_1", # <- Optional! line_start=(-10649, -18704, 3444), line_end=(-4149, -18704, 3444), perpendicular=(-9399, -24204, 3444), step_size_x=500, step_size_y=1000 ) qa.add_area( name="well_2", line_start=(-10729, -6580, 3444), line_end=(-8229, -6580, 3444), perpendicular=(-9479, -9080, 3444), step_size_x=25, step_size_y=25 ) qa.add_area( name="well_4", line_start=(-10729, 22000, 3444), line_end=(-8229, 22000, 3444), perpendicular=(-9479, 18000, 3444), step_size_x=250, step_size_y=250 ) qa.plot_areas() ``` **NOTE:** numbers in boxes indicate acquisition order! ### 3. Generate measurement positions from areas ``` qa.generate_targets() plt.close('all') qa.plot_targets(annotate=True) ``` ### 4. Run acquistion Once you are happy with plots above: - Launch Telnet in apsmaldi software - Press START on TUNE somputer - Run the following cell with dummy=True to test coordinates - Run the following cell with dummy=Fase, measure=True to perform acquisition ``` OUTPUT_DIR = 'D:\\imagingMS\\2021_08\\your name\\' IMZML_PREFIX = OUTPUT_DIR + '01052019_Mouse_DHB_pos_mz200-800_px50x50_LR' qa.acquire( filename=IMZML_PREFIX, # Prefix for output coordinates file used in ImzML conversion dummy=True, # False - send commands to MALDI, True - don't connect, just print commands measure=False, # False - move stage only, True - move stage & acquire data email_on_success='test@example.com', # Set to None to suppress email_on_failure='test@example.com', # Set to None to suppress ) ``` ### 5. Cleanup After imaging run the following cell to terminate Telnet ``` rc.close(quit=True) ```
github_jupyter
# World's Fastest Growing Economies as of 2018 This project seeks to find out countries with eceonomy growth. This data was gotten from <a>"https://en.wikipedia.org/wiki/List_of_countries_by_real_GDP_growth_rate"<a> by web scraping and loading the table from the website. ## Let's dive in. ``` from bs4 import BeautifulSoup as bs import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np import requests import csv url = "https://en.wikipedia.org/wiki/List_of_countries_by_real_GDP_growth_rate" req = requests.get(url) req.status_code req.text soup = bs(req.text, 'html5lib') #soup.title #links = soup.find_all('a') """ for link in links: print('----------------------------------------------------------------') print(link.get('href')) print('----------------------------------------------------------------') print(len(links)) """ df = pd.read_html("https://en.wikipedia.org/wiki/List_of_countries_by_real_GDP_growth_rate") gdp_growthrate = df[1] gdp_growthrate.dtypes gdp_growthrate.head(10) gdp_growthrate.shape growth_overtime = df[2] growth_overtime.head() growth_overtime.shape growth_overtime ``` ## Cleaning The Data ``` gdp_growthrate.isnull().sum() #Identity Missing Value Index missing_data = gdp_growthrate[gdp_growthrate.isnull().any(axis=1)].index.values.tolist() #To get the missing values in a dataframe gdp_growthrate.iloc[missing_data,:] gdp_growthrate.drop([189], axis=0, inplace=True) gdp_growthrate.isnull().sum() gdp_growthrate.dtypes gdp_growthrate = gdp_growthrate.rename( columns={'Real GDP growthrate (%)[2]' : "GDP_growthrate%"}) change = ["GDP_growthrate%"] for col in change: gdp_growthrate[col] = gdp_growthrate[col].str.split('|', expand=True)[0] gdp_growthrate[col] = gdp_growthrate[col].str.replace(' ', '') gdp_growthrate['GDP_growthrate%'] = gdp_growthrate['GDP_growthrate%'].astype(float) gdp_growthrate.dtypes growth_overtime.isnull().sum() growth_overtime.dtypes #Identity Missing Value Index missing = growth_overtime[growth_overtime.isnull().any(axis=1)].index.values.tolist() #To get the missing values in a dataframe growth_overtime.iloc[missing,:] growth_overtime.drop(missing, axis=0, inplace=True) growth_overtime.isnull().sum() growth_overtime = growth_overtime.rename( columns={'2018[4]' : 2018}) growth_overtime.iloc[0, :] growth_overtime['Country'] = growth_overtime['Country'].str.split('.', expand=True)[0] growth_overtime.head(2) growth_overtime['Country'] = growth_overtime['Country'].str.split(' ', expand=True)[0] growth_overtime.head(2) ``` # Data Exploratory Analysis ``` #Country with least growth over the years least_growth = growth_overtime.sort_values(by='Avg').head() least_growth #Countries with Higest growth Over the years highest_growth = growth_overtime.sort_values(by='Avg', ascending=False).head() highest_growth Country = highest_growth['Country'] Growth = highest_growth['Avg'] # Figure Size fig = plt.figure(figsize =(8, 6)) plt.barh(Country, Growth) # Show Plot plt.show() Country = least_growth['Country'] Growth = least_growth['Avg'].sort_values() # Figure Size fig = plt.figure(figsize =(8, 6)) plt.barh(Country, Growth) # Show Plot plt.show() ``` ## Insights From my analysis and graphs above, it can be seen that as of 2018, the country's with highest growth are Ethopia, Ireland, Ivory, Djibouti and Turkmenistan with an average growth of 9.40, 8.60, 8.34, 7.66 and 7.62 respectively. While Yemen has the least average growth of -11.76 over the years.
github_jupyter
<a href="https://practicalai.me"><img src="https://raw.githubusercontent.com/practicalAI/images/master/images/rounded_logo.png" width="100" align="left" hspace="20px" vspace="20px"></a> <img src="https://raw.githubusercontent.com/practicalAI/images/master/basic_ml/06_Multilayer_Perceptron/nn.png" width="200" vspace="10px" align="right"> <div align="left"> <h1>Multilayer Perceptron (MLP)</h1> In this lesson, we will explore multilayer perceptrons (MLPs) which are a basic type of neural network. We will implement them using Tensorflow with Keras. <table align="center"> <td> <img src="https://raw.githubusercontent.com/practicalAI/images/master/images/rounded_logo.png" width="25"><a target="_blank" href="https://practicalai.me"> View on practicalAI</a> </td> <td> <img src="https://raw.githubusercontent.com/practicalAI/images/master/images/colab_logo.png" width="25"><a target="_blank" href="https://colab.research.google.com/github/practicalAI/practicalAI/blob/master/notebooks/06_Multilayer_Perceptron.ipynb"> Run in Google Colab</a> </td> <td> <img src="https://raw.githubusercontent.com/practicalAI/images/master/images/github_logo.png" width="22"><a target="_blank" href="https://github.com/practicalAI/practicalAI/blob/master/notebooks/basic_ml/06_Multilayer_Perceptron.ipynb"> View code on GitHub</a> </td> </table> # Overview * **Objective:** Predict the probability of class $y$ given the inputs $X$. Non-linearity is introduced to model the complex, non-linear data. * **Advantages:** * Can model non-linear patterns in the data really well. * **Disadvantages:** * Overfits easily. * Computationally intensive as network increases in size. * Not easily interpretable. * **Miscellaneous:** Future neural network architectures that we'll see use the MLP as a modular unit for feed forward operations (affine transformation (XW) followed by a non-linear operation). Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear. <img src="https://raw.githubusercontent.com/practicalAI/images/master/basic_ml/06_Multilayer_Perceptron/nn.png" width="550"> $z_1 = XW_1$ $a_1 = f(z_1)$ $z_2 = a_1W_2$ $\hat{y} = softmax(z_2)$ # classification * $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features) * $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1) * $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$ * $f$ = non-linear activation function *nn $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$ * $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes) * $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$ * $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) **Note**: We're going to leave out the bias terms $\beta$ to avoid further crowding the backpropagation calculations. ### Training 1. Randomly initialize the model's weights $W$ (we'll cover more effective initalization strategies later in this lesson). 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. * $z_1 = XW_1$ * $a_1 = f(z_1)$ * $z_2 = a_1W_2$ * $\hat{y} = softmax(z_2)$ 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i y_i ln (\hat{y_i}) $ * Since each input maps to exactly one class, our cross-entropy loss simplifies to: * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $ 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$ * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_2}} \frac{\partial{a_2}}{\partial{z_2}} \frac{\partial{z_2}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $ 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probabiltiy for the incorrect classes (j) and encourage a higher probability for the correct class (y). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$ 6. Repeat steps 2 - 4 until model performs well. # Set up ``` # Use TensorFlow 2.x %tensorflow_version 2.x import os import numpy as np import tensorflow as tf # Arguments SEED = 1234 SHUFFLE = True DATA_FILE = "spiral.csv" INPUT_DIM = 2 NUM_CLASSES = 3 NUM_SAMPLES_PER_CLASS = 500 TRAIN_SIZE = 0.7 VAL_SIZE = 0.15 TEST_SIZE = 0.15 NUM_EPOCHS = 10 BATCH_SIZE = 32 HIDDEN_DIM = 100 LEARNING_RATE = 1e-2 # Set seed for reproducability np.random.seed(SEED) tf.random.set_seed(SEED) ``` # Data Download non-linear spiral data for a classification task. ``` import matplotlib.pyplot as plt import pandas as pd import urllib # Upload data from GitHub to notebook's local drive url = "https://raw.githubusercontent.com/practicalAI/practicalAI/master/data/spiral.csv" response = urllib.request.urlopen(url) html = response.read() with open(DATA_FILE, 'wb') as fp: fp.write(html) # Load data df = pd.read_csv(DATA_FILE, header=0) X = df[['X1', 'X2']].values y = df['color'].values df.head(5) print ("X: ", np.shape(X)) print ("y: ", np.shape(y)) # Visualize data plt.title("Generated non-linear data") colors = {'c1': 'red', 'c2': 'yellow', 'c3': 'blue'} plt.scatter(X[:, 0], X[:, 1], c=[colors[_y] for _y in y], edgecolors='k', s=25) plt.show() ``` # Split data ``` import collections import json from sklearn.model_selection import train_test_split ``` ### Components ``` def train_val_test_split(X, y, val_size, test_size, shuffle): """Split data into train/val/test datasets. """ X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_size, stratify=y, shuffle=shuffle) X_train, X_val, y_train, y_val = train_test_split( X_train, y_train, test_size=val_size, stratify=y_train, shuffle=shuffle) return X_train, X_val, X_test, y_train, y_val, y_test ``` ### Operations ``` # Create data splits X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split( X=X, y=y, val_size=VAL_SIZE, test_size=TEST_SIZE, shuffle=SHUFFLE) class_counts = dict(collections.Counter(y)) print (f"X_train: {X_train.shape}, y_train: {y_train.shape}") print (f"X_val: {X_val.shape}, y_val: {y_val.shape}") print (f"X_test: {X_test.shape}, y_test: {y_test.shape}") print (f"X_train[0]: {X_train[0]}") print (f"y_train[0]: {y_train[0]}") print (f"Classes: {class_counts}") ``` # Label encoder ``` import json from sklearn.preprocessing import LabelEncoder # Output vectorizer y_tokenizer = LabelEncoder() # Fit on train data y_tokenizer = y_tokenizer.fit(y_train) classes = list(y_tokenizer.classes_) print (f"classes: {classes}") # Convert labels to tokens print (f"y_train[0]: {y_train[0]}") y_train = y_tokenizer.transform(y_train) y_val = y_tokenizer.transform(y_val) y_test = y_tokenizer.transform(y_test) print (f"y_train[0]: {y_train[0]}") # Class weights counts = collections.Counter(y_train) class_weights = {_class: 1.0/count for _class, count in counts.items()} print (f"class counts: {counts},\nclass weights: {class_weights}") ``` # Standardize data We need to standardize our data (zero mean and unit variance) in order to optimize quickly. We're only going to standardize the inputs X because out outputs y are class values. ``` from sklearn.preprocessing import StandardScaler # Standardize the data (mean=0, std=1) using training data X_scaler = StandardScaler().fit(X_train) # Apply scaler on training and test data (don't standardize outputs for classification) standardized_X_train = X_scaler.transform(X_train) standardized_X_val = X_scaler.transform(X_val) standardized_X_test = X_scaler.transform(X_test) # Check print (f"standardized_X_train: mean: {np.mean(standardized_X_train, axis=0)[0]}, std: {np.std(standardized_X_train, axis=0)[0]}") print (f"standardized_X_val: mean: {np.mean(standardized_X_val, axis=0)[0]}, std: {np.std(standardized_X_val, axis=0)[0]}") print (f"standardized_X_test: mean: {np.mean(standardized_X_test, axis=0)[0]}, std: {np.std(standardized_X_test, axis=0)[0]}") ``` # Linear model Before we get to our neural network, we're going to implement a generalized linear model (logistic regression) first to see why linear models won't suffice for our dataset. We will use Tensorflow with Keras to do this. ``` import itertools import matplotlib.pyplot as plt from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Input from tensorflow.keras.losses import SparseCategoricalCrossentropy from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam ``` ### Components ``` # Linear model class LogisticClassifier(Model): def __init__(self, hidden_dim, num_classes): super(LogisticClassifier, self).__init__() self.fc1 = Dense(units=hidden_dim, activation='linear') # linear = no activation function self.fc2 = Dense(units=num_classes, activation='softmax') def call(self, x_in, training=False): """Forward pass.""" z = self.fc1(x_in) y_pred = self.fc2(z) return y_pred def sample(self, input_shape): x_in = Input(shape=input_shape) return Model(inputs=x_in, outputs=self.call(x_in)).summary() def plot_confusion_matrix(y_true, y_pred, classes, cmap=plt.cm.Blues): """Plot a confusion matrix using ground truth and predictions.""" # Confusion matrix cm = confusion_matrix(y_true, y_pred) cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # Figure fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(cm, cmap=plt.cm.Blues) fig.colorbar(cax) # Axis plt.title("Confusion matrix") plt.ylabel("True label") plt.xlabel("Predicted label") ax.set_xticklabels([''] + classes) ax.set_yticklabels([''] + classes) ax.xaxis.set_label_position('bottom') ax.xaxis.tick_bottom() # Values thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, f"{cm[i, j]:d} ({cm_norm[i, j]*100:.1f}%)", horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") # Display plt.show() def plot_multiclass_decision_boundary(model, X, y, savefig_fp=None): """Plot the multiclass decision boundary for a model that accepts 2D inputs. Arguments: model {function} -- trained model with function model.predict(x_in). X {numpy.ndarray} -- 2D inputs with shape (N, 2). y {numpy.ndarray} -- 1D outputs with shape (N,). """ # Axis boundaries x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1 y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1 xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101)) # Create predictions x_in = np.c_[xx.ravel(), yy.ravel()] y_pred = model.predict(x_in) y_pred = np.argmax(y_pred, axis=1).reshape(xx.shape) # Plot decision boundary plt.contourf(xx, yy, y_pred, cmap=plt.cm.Spectral, alpha=0.8) plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu) plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) # Plot if savefig_fp: plt.savefig(savefig_fp, format='png') ``` ### Operations ``` # Initialize the model model = LogisticClassifier(hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES) model.sample(input_shape=(INPUT_DIM,)) # Compile model.compile(optimizer=Adam(lr=LEARNING_RATE), loss=SparseCategoricalCrossentropy(), metrics=['accuracy']) # Training model.fit(x=standardized_X_train, y=y_train, validation_data=(standardized_X_val, y_val), epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, class_weight=class_weights, shuffle=False, verbose=1) # Predictions pred_train = model.predict(standardized_X_train) pred_test = model.predict(standardized_X_test) print (f"sample probability: {pred_test[0]}") pred_train = np.argmax(pred_train, axis=1) pred_test = np.argmax(pred_test, axis=1) print (f"sample class: {pred_test[0]}") # Accuracy train_acc = accuracy_score(y_train, pred_train) test_acc = accuracy_score(y_test, pred_test) print (f"train acc: {train_acc:.2f}, test acc: {test_acc:.2f}") # Metrics plot_confusion_matrix(y_test, pred_test, classes=classes) print (classification_report(y_test, pred_test)) # Visualize the decision boundary plt.figure(figsize=(12,5)) plt.subplot(1, 2, 1) plt.title("Train") plot_multiclass_decision_boundary(model=model, X=standardized_X_train, y=y_train) plt.subplot(1, 2, 2) plt.title("Test") plot_multiclass_decision_boundary(model=model, X=standardized_X_test, y=y_test) plt.show() ``` # Activation functions Using the generalized linear method (logistic regression) yielded poor results because of the non-linearity present in our data. We need to use an activation function that can allow our model to learn and map the non-linearity in our data. There are many different options so let's explore a few. ``` from tensorflow.keras.activations import relu from tensorflow.keras.activations import sigmoid from tensorflow.keras.activations import tanh # Fig size plt.figure(figsize=(12,3)) # Data x = np.arange(-5., 5., 0.1) # Sigmoid activation (constrain a value between 0 and 1.) plt.subplot(1, 3, 1) plt.title("Sigmoid activation") y = sigmoid(x) plt.plot(x, y) # Tanh activation (constrain a value between -1 and 1.) plt.subplot(1, 3, 2) y = tanh(x) plt.title("Tanh activation") plt.plot(x, y) # Relu (clip the negative values to 0) plt.subplot(1, 3, 3) y = relu(x) plt.title("ReLU activation") plt.plot(x, y) # Show plots plt.show() ``` The ReLU activation function ($max(0,z)$) is by far the most widely used activation function for neural networks. But as you can see, each activation function has it's own contraints so there are circumstances where you'll want to use different ones. For example, if we need to constrain our outputs between 0 and 1, then the sigmoid activation is the best choice. <img height="45" src="http://bestanimations.com/HomeOffice/Lights/Bulbs/animated-light-bulb-gif-29.gif" align="left" vspace="20px" hspace="10px"> In some cases, using a ReLU activation function may not be sufficient. For instance, when the outputs from our neurons are mostly negative, the activation function will produce zeros. This effectively creates a "dying ReLU" and a recovery is unlikely. To mitigate this effect, we could lower the learning rate or use [alternative ReLU activations](https://medium.com/tinymind/a-practical-guide-to-relu-b83ca804f1f7), ex. leaky ReLU or parametric ReLU (PReLU), which have a small slope for negative neuron outputs. # From scratch Now let's create our multilayer perceptron (MLP) which is going to be exactly like the logistic regression model but with the activation function to map the non-linearity in our data. Before we use TensorFlow 2.0 + Keras we will implement our neural network from scratch using NumPy so we can: 1. Absorb the fundamental concepts by implementing from scratch 2. Appreciate the level of abstraction TensorFlow provides <div align="left"> <img src="https://raw.githubusercontent.com/practicalAI/images/master/images/lightbulb.gif" width="45px" align="left" hspace="10px"> </div> It's normal to find the math and code in this section slightly complex. You can still read each of the steps to build intuition for when we implement this using TensorFlow + Keras. ``` print (f"X: {standardized_X_train.shape}") print (f"y: {y_train.shape}") ``` Our goal is to learn a model 𝑦̂ that models 𝑦 given 𝑋 . You'll notice that neural networks are just extensions of the generalized linear methods we've seen so far but with non-linear activation functions since our data will be highly non-linear. $z_1 = XW_1$ $a_1 = f(z_1)$ $z_2 = a_1W_2$ $\hat{y} = softmax(z_2)$ # classification * $X$ = inputs | $\in \mathbb{R}^{NXD}$ ($D$ is the number of features) * $W_1$ = 1st layer weights | $\in \mathbb{R}^{DXH}$ ($H$ is the number of hidden units in layer 1) * $z_1$ = outputs from first layer $\in \mathbb{R}^{NXH}$ * $f$ = non-linear activation function * $a_1$ = activation applied first layer's outputs | $\in \mathbb{R}^{NXH}$ * $W_2$ = 2nd layer weights | $\in \mathbb{R}^{HXC}$ ($C$ is the number of classes) * $z_2$ = outputs from second layer $\in \mathbb{R}^{NXH}$ * $\hat{y}$ = prediction | $\in \mathbb{R}^{NXC}$ ($N$ is the number of samples) 1. Randomly initialize the model's weights $W$ (we'll cover more effective initalization strategies later in this lesson). ``` # Initialize first layer's weights W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM) b1 = np.zeros((1, HIDDEN_DIM)) print (f"W1: {W1.shape}") print (f"b1: {b1.shape}") ``` 2. Feed inputs $X$ into the model to do the forward pass and receive the probabilities. First we pass the inputs into the first layer. * $z_1 = XW_1$ ``` # z1 = [NX2] · [2X100] + [1X100] = [NX100] z1 = np.dot(standardized_X_train, W1) + b1 print (f"z1: {z1.shape}") ``` Next we apply the non-linear activation function, ReLU ($max(0,z)$) in this case. * $a_1 = f(z_1)$ ``` # Apply activation function a1 = np.maximum(0, z1) # ReLU print (f"a_1: {a1.shape}") ``` We pass the activations to the second layer to get our logits. * $z_2 = a_1W_2$ ``` # Initialize second layer's weights W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES) b2 = np.zeros((1, NUM_CLASSES)) print (f"W2: {W2.shape}") print (f"b2: {b2.shape}") # z2 = logits = [NX100] · [100X3] + [1X3] = [NX3] logits = np.dot(a1, W2) + b2 print (f"logits: {logits.shape}") print (f"sample: {logits[0]}") ``` We'll apply the softmax function to normalize the logits and btain class probabilities. * $\hat{y} = softmax(z_2)$ ``` # Normalization via softmax to obtain class probabilities exp_logits = np.exp(logits) y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True) print (f"y_hat: {y_hat.shape}") print (f"sample: {y_hat[0]}") ``` 3. Compare the predictions $\hat{y}$ (ex. [0.3, 0.3, 0.4]]) with the actual target values $y$ (ex. class 2 would look like [0, 0, 1]) with the objective (cost) function to determine loss $J$. A common objective function for classification tasks is cross-entropy loss. * $J(\theta) = - \sum_i ln(\hat{y_i}) = - \sum_i ln (\frac{e^{X_iW_y}}{\sum_j e^{X_iW}}) $ ``` # Loss correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train]) loss = np.sum(correct_class_logprobs) / len(y_train) ``` 4. Calculate the gradient of loss $J(\theta)$ w.r.t to the model weights. The gradient of the loss w.r.t to W2 is the same as the gradients from logistic regression since $\hat{y} = softmax(z_2)$. * $\frac{\partial{J}}{\partial{W_{2j}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2j}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}0 - e^{a_1W_{2y}}e^{a_1W_{2j}}a_1}{(\sum_j e^{a_1W})^2} = \frac{a_1e^{a_1W_{2j}}}{\sum_j e^{a_1W}} = a_1\hat{y}$ * $\frac{\partial{J}}{\partial{W_{2y}}} = \frac{\partial{J}}{\partial{\hat{y}}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\hat{y}}\frac{\partial{\hat{y}}}{\partial{W_{2y}}} = - \frac{1}{\frac{e^{W_{2y}a_1}}{\sum_j e^{a_1W}}}\frac{\sum_j e^{a_1W}e^{a_1W_{2y}}a_1 - e^{a_1W_{2y}}e^{a_1W_{2y}}a_1}{(\sum_j e^{a_1W})^2} = \frac{1}{\hat{y}}(a_1\hat{y} - a_1\hat{y}^2) = a_1(\hat{y}-1)$ The gradient of the loss w.r.t W1 is a bit trickier since we have to backpropagate through two sets of weights. * $ \frac{\partial{J}}{\partial{W_1}} = \frac{\partial{J}}{\partial{\hat{y}}} \frac{\partial{\hat{y}}}{\partial{a_1}} \frac{\partial{a_1}}{\partial{z_1}} \frac{\partial{z_1}}{\partial{W_1}} = W_2(\partial{scores})(\partial{ReLU})X $ ``` # dJ/dW2 dscores = y_hat dscores[range(len(y_hat)), y_train] -= 1 dscores /= len(y_train) dW2 = np.dot(a1.T, dscores) db2 = np.sum(dscores, axis=0, keepdims=True) # dJ/dW1 dhidden = np.dot(dscores, W2.T) dhidden[a1 <= 0] = 0 # ReLu backprop dW1 = np.dot(standardized_X_train.T, dhidden) db1 = np.sum(dhidden, axis=0, keepdims=True) ``` 5. Update the weights $W$ using a small learning rate $\alpha$. The updates will penalize the probabiltiy for the incorrect classes (j) and encourage a higher probability for the correct class (y). * $W_i = W_i - \alpha\frac{\partial{J}}{\partial{W_i}}$ ``` # Update weights W1 += -LEARNING_RATE * dW1 b1 += -LEARNING_RATE * db1 W2 += -LEARNING_RATE * dW2 b2 += -LEARNING_RATE * db2 ``` 6. Repeat steps 2 - 4 until model performs well. ``` # Initialize random weights W1 = 0.01 * np.random.randn(INPUT_DIM, HIDDEN_DIM) b1 = np.zeros((1, HIDDEN_DIM)) W2 = 0.01 * np.random.randn(HIDDEN_DIM, NUM_CLASSES) b2 = np.zeros((1, NUM_CLASSES)) # Training loop for epoch_num in range(1000): # First layer forward pass [NX2] · [2X100] = [NX100] z1 = np.dot(standardized_X_train, W1) + b1 # Apply activation function a1 = np.maximum(0, z1) # ReLU # z2 = logits = [NX100] · [100X3] = [NX3] logits = np.dot(a1, W2) + b2 # Normalization via softmax to obtain class probabilities exp_logits = np.exp(logits) y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True) # Loss correct_class_logprobs = -np.log(y_hat[range(len(y_hat)), y_train]) loss = np.sum(correct_class_logprobs) / len(y_train) # show progress if epoch_num%100 == 0: # Accuracy y_pred = np.argmax(logits, axis=1) accuracy = np.mean(np.equal(y_train, y_pred)) print (f"Epoch: {epoch_num}, loss: {loss:.3f}, accuracy: {accuracy:.3f}") # dJ/dW2 dscores = y_hat dscores[range(len(y_hat)), y_train] -= 1 dscores /= len(y_train) dW2 = np.dot(a1.T, dscores) db2 = np.sum(dscores, axis=0, keepdims=True) # dJ/dW1 dhidden = np.dot(dscores, W2.T) dhidden[a1 <= 0] = 0 # ReLu backprop dW1 = np.dot(standardized_X_train.T, dhidden) db1 = np.sum(dhidden, axis=0, keepdims=True) # Update weights W1 += -1e0 * dW1 b1 += -1e0 * db1 W2 += -1e0 * dW2 b2 += -1e0 * db2 class MLPFromScratch(): def predict(self, x): z1 = np.dot(x, W1) + b1 a1 = np.maximum(0, z1) logits = np.dot(a1, W2) + b2 exp_logits = np.exp(logits) y_hat = exp_logits / np.sum(exp_logits, axis=1, keepdims=True) return y_hat # Evaluation model = MLPFromScratch() logits_train = model.predict(standardized_X_train) pred_train = np.argmax(logits_train, axis=1) logits_test = model.predict(standardized_X_test) pred_test = np.argmax(logits_test, axis=1) # Training and test accuracy train_acc = np.mean(np.equal(y_train, pred_train)) test_acc = np.mean(np.equal(y_test, pred_test)) print (f"train acc: {train_acc:.2f}, test acc: {test_acc:.2f}") # Visualize the decision boundary plt.figure(figsize=(12,5)) plt.subplot(1, 2, 1) plt.title("Train") plot_multiclass_decision_boundary(model=model, X=standardized_X_train, y=y_train) plt.subplot(1, 2, 2) plt.title("Test") plot_multiclass_decision_boundary(model=model, X=standardized_X_test, y=y_test) plt.show() ``` Credit for the plotting functions and the intuition behind all this is due to [CS231n](http://cs231n.github.io/neural-networks-case-study/), one of the best courses for machine learning. Now let's implement the MLP with TensorFlow + Keras. # TensorFlow + Keras ### Components ``` # MLP class MLP(Model): def __init__(self, hidden_dim, num_classes): super(MLP, self).__init__() self.fc1 = Dense(units=hidden_dim, activation='relu') # replaced linear with relu self.fc2 = Dense(units=num_classes, activation='softmax') def call(self, x_in, training=False): """Forward pass.""" z = self.fc1(x_in) y_pred = self.fc2(z) return y_pred def sample(self, input_shape): x_in = Input(shape=input_shape) return Model(inputs=x_in, outputs=self.call(x_in)).summary() ``` ### Operations ``` # Initialize the model model = MLP(hidden_dim=HIDDEN_DIM, num_classes=NUM_CLASSES) model.sample(input_shape=(INPUT_DIM,)) # Compile optimizer = Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss=SparseCategoricalCrossentropy(), metrics=['accuracy']) # Training model.fit(x=standardized_X_train, y=y_train, validation_data=(standardized_X_val, y_val), epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, class_weight=class_weights, shuffle=False, verbose=1) # Predictions pred_train = model.predict(standardized_X_train) pred_test = model.predict(standardized_X_test) print (f"sample probability: {pred_test[0]}") pred_train = np.argmax(pred_train, axis=1) pred_test = np.argmax(pred_test, axis=1) print (f"sample class: {pred_test[0]}") # Accuracy train_acc = accuracy_score(y_train, pred_train) test_acc = accuracy_score(y_test, pred_test) print (f"train acc: {train_acc:.2f}, test acc: {test_acc:.2f}") # Metrics plot_confusion_matrix(y_test, pred_test, classes=classes) print (classification_report(y_test, pred_test)) # Visualize the decision boundary plt.figure(figsize=(12,5)) plt.subplot(1, 2, 1) plt.title("Train") plot_multiclass_decision_boundary(model=model, X=standardized_X_train, y=y_train) plt.subplot(1, 2, 2) plt.title("Test") plot_multiclass_decision_boundary(model=model, X=standardized_X_test, y=y_test) plt.show() ``` # Inference ``` # Inputs for inference X_infer = pd.DataFrame([{'X1': 0.1, 'X2': 0.1}]) X_infer.head() # Standardize standardized_X_infer = X_scaler.transform(X_infer) print (standardized_X_infer) # Predict y_infer = model.predict(standardized_X_infer) _class = np.argmax(y_infer) print (f"The probability that you have a class {classes[_class]} is {y_infer[0][_class]*100.0:.0f}%") ``` # Initializing weights So far we have been initializing weights with small random values and this isn't optimal for convergence during training. The objective is to have weights that are able to produce outputs that follow a similar distribution across all neurons. We can do this by enforcing weights to have unit variance prior the affine and non-linear operations. <img height="45" src="http://bestanimations.com/HomeOffice/Lights/Bulbs/animated-light-bulb-gif-29.gif" align="left" vspace="20px" hspace="10px"> A popular method is to apply [xavier initialization](http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization), which essentially initializes the weights to allow the signal from the data to reach deep into the network. You may be wondering why we don't do this for every forward pass and that's a great question. We'll look at more advanced strategies that help with optimization like batch/layer normalization, etc. in future lessons. Meanwhile you can check out other initializers [here](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/initializers). ``` from tensorflow.keras.initializers import glorot_normal # MLP class MLP(Model): def __init__(self, hidden_dim, num_classes): super(MLP, self).__init__() xavier_initializer = glorot_normal() # xavier glorot initiailization self.fc1 = Dense(units=hidden_dim, kernel_initializer=xavier_initializer, activation='relu') self.fc2 = Dense(units=num_classes, activation='softmax') def call(self, x_in, training=False): """Forward pass.""" z = self.fc1(x_in) y_pred = self.fc2(z) return y_pred def sample(self, input_shape): x_in = Input(shape=input_shape) return Model(inputs=x_in, outputs=self.call(x_in)).summary() ``` # Dropout A great technique to overcome overfitting is to increase the size of your data but this isn't always an option. Fortuntely, there are methods like regularization and dropout that can help create a more robust model. Dropout is a technique (used only during training) that allows us to zero the outputs of neurons. We do this for `dropout_p`% of the total neurons in each layer and it changes every batch. Dropout prevents units from co-adapting too much to the data and acts as a sampling strategy since we drop a different set of neurons each time. <img src="https://raw.githubusercontent.com/practicalAI/images/master/basic_ml/06_Multilayer_Perceptron/dropout.png" width="350"> * [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf) ``` from tensorflow.keras.layers import Dropout from tensorflow.keras.regularizers import l2 ``` ### Components ``` # MLP class MLP(Model): def __init__(self, hidden_dim, lambda_l2, dropout_p, num_classes): super(MLP, self).__init__() self.fc1 = Dense(units=hidden_dim, kernel_regularizer=l2(lambda_l2), # adding L2 regularization activation='relu') self.dropout = Dropout(rate=dropout_p) self.fc2 = Dense(units=num_classes, activation='softmax') def call(self, x_in, training=False): """Forward pass.""" z = self.fc1(x_in) if training: z = self.dropout(z, training=training) # adding dropout y_pred = self.fc2(z) return y_pred def sample(self, input_shape): x_in = Input(shape=input_shape) return Model(inputs=x_in, outputs=self.call(x_in)).summary() ``` ### Operations ``` # Arguments DROPOUT_P = 0.1 # % of the neurons that are dropped each pass LAMBDA_L2 = 1e-4 # L2 regularization # Initialize the model model = MLP(hidden_dim=HIDDEN_DIM, lambda_l2=LAMBDA_L2, dropout_p=DROPOUT_P, num_classes=NUM_CLASSES) model.sample(input_shape=(INPUT_DIM,)) ``` # Overfitting Though neural networks are great at capturing non-linear relationships they are highly susceptible to overfitting to the training data and failing to generalize on test data. Just take a look at the example below where we generate completely random data and are able to fit a model with [$2*N*C + D$](https://arxiv.org/abs/1611.03530) hidden units. The training performance is good (~70%) but the overfitting leads to very poor test performance. We'll be covering strategies to tackle overfitting in future lessons. ``` # Arguments NUM_EPOCHS = 500 NUM_SAMPLES_PER_CLASS = 50 LEARNING_RATE = 1e-1 HIDDEN_DIM = 2 * NUM_SAMPLES_PER_CLASS * NUM_CLASSES + INPUT_DIM # 2*N*C + D # Generate random data X = np.random.rand(NUM_SAMPLES_PER_CLASS * NUM_CLASSES, INPUT_DIM) y = np.array([[i]*NUM_SAMPLES_PER_CLASS for i in range(NUM_CLASSES)]).reshape(-1) print ("X: ", format(np.shape(X))) print ("y: ", format(np.shape(y))) # Create data splits X_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split( X, y, val_size=VAL_SIZE, test_size=TEST_SIZE, shuffle=SHUFFLE) print ("X_train:", X_train.shape) print ("y_train:", y_train.shape) print ("X_val:", X_val.shape) print ("y_val:", y_val.shape) print ("X_test:", X_test.shape) print ("y_test:", y_test.shape) # Standardize the inputs (mean=0, std=1) using training data X_scaler = StandardScaler().fit(X_train) # Apply scaler on training and test data (don't standardize outputs for classification) standardized_X_train = X_scaler.transform(X_train) standardized_X_val = X_scaler.transform(X_val) standardized_X_test = X_scaler.transform(X_test) # Initialize the model model = MLP(hidden_dim=HIDDEN_DIM, lambda_l2=0.0, dropout_p=0.0, num_classes=NUM_CLASSES) model.sample(input_shape=(INPUT_DIM,)) # Compile optimizer = Adam(lr=LEARNING_RATE) model.compile(optimizer=optimizer, loss=SparseCategoricalCrossentropy(), metrics=['accuracy']) # Training model.fit(x=standardized_X_train, y=y_train, validation_data=(standardized_X_val, y_val), epochs=NUM_EPOCHS, batch_size=BATCH_SIZE, class_weight=class_weights, shuffle=False, verbose=1) # Predictions pred_train = model.predict(standardized_X_train) pred_test = model.predict(standardized_X_test) print (f"sample probability: {pred_test[0]}") pred_train = np.argmax(pred_train, axis=1) pred_test = np.argmax(pred_test, axis=1) print (f"sample class: {pred_test[0]}") # Accuracy train_acc = accuracy_score(y_train, pred_train) test_acc = accuracy_score(y_test, pred_test) print (f"train acc: {train_acc:.2f}, test acc: {test_acc:.2f}") # Classification report plot_confusion_matrix(y_true=y_test, y_pred=pred_test, classes=classes) print (classification_report(y_test, pred_test)) # Visualize the decision boundary plt.figure(figsize=(12,5)) plt.subplot(1, 2, 1) plt.title("Train") plot_multiclass_decision_boundary(model=model, X=standardized_X_train, y=y_train) plt.subplot(1, 2, 2) plt.title("Test") plot_multiclass_decision_boundary(model=model, X=standardized_X_test, y=y_test) plt.show() ``` It's important that we experiment, starting with simple models that underfit (high bias) and improve it towards a good fit. Starting with simple models (linear/logistic regression) let's us catch errors without the added complexity of more sophisticated models (neural networks). <img src="https://raw.githubusercontent.com/practicalAI/images/master/basic_ml/06_Multilayer_Perceptron/fit.png" width="700"> --- <div align="center"> Subscribe to our <a href="https://practicalai.me/#newsletter">newsletter</a> and follow us on social media to get the latest updates! <a class="ai-header-badge" target="_blank" href="https://github.com/practicalAI/practicalAI"> <img src="https://img.shields.io/github/stars/practicalAI/practicalAI.svg?style=social&label=Star"></a>&nbsp; <a class="ai-header-badge" target="_blank" href="https://www.linkedin.com/company/practicalai-me"> <img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>&nbsp; <a class="ai-header-badge" target="_blank" href="https://twitter.com/practicalAIme"> <img src="https://img.shields.io/twitter/follow/practicalAIme.svg?label=Follow&style=social"> </a> </div> </div>
github_jupyter
``` import pandas as pd import swifter import numpy as np df = pd.read_csv("data/gc-1m.csv", sep=";")#,nrows=1000) df.columns=["Date","Time", "Open", "High", "Low", "Close", "Volume"] #Wilder’s Smoothing function def Wilder(data, periods): start = np.where(~np.isnan(data))[0][0] #Check if nans present in beginning Wilder = np.array([np.nan]*len(data)) Wilder[start+periods-1] = data[start:(start+periods)].mean() #Simple Moving Average for i in range(start+periods,len(data)): Wilder[i] = (Wilder[i-1]*(periods-1) + data[i])/periods #Wilder Smoothing return(Wilder) dt=(df['Date'] + ' ' + df['Time']).swifter.apply(pd.to_datetime) df["Time"]=dt df.drop(columns=["Date","Volume"],inplace=True) df.set_index(["Time"],inplace=True) #Simple Moving Average (SMA) df['SMA_5'] = df['Close'].transform(lambda x: x.rolling(window = 5).mean()) df['SMA_15'] = df['Close'].transform(lambda x: x.rolling(window = 15).mean()) df['SMA_ratio'] = df['SMA_15'] / df['SMA_5'] #Average True Range (ATR) df['prev_close'] = df['Close'].shift(1) df['TR'] = np.maximum((df['High'] - df['Low']), np.maximum(abs(df['High'] - df['prev_close']), abs(df['prev_close'] - df['Low']))) TR_data = df.copy() df['ATR_5'] = Wilder(TR_data['TR'], 5) df['ATR_15'] = Wilder(TR_data['TR'], 15) df['ATR_Ratio'] = df['ATR_5'] / df['ATR_15'] #Average Directional Index (ADX) df['prev_high'] = df['High'].shift(1) df['prev_low'] = df['Low'].shift(1) df['+DM'] = np.where(~np.isnan(df.prev_high), np.where((df['High'] > df['prev_high']) & (((df['High'] - df['prev_high']) > (df['prev_low'] - df['Low']))), df['High'] - df['prev_high'], 0),np.nan) df['-DM'] = np.where(~np.isnan(df.prev_low), np.where((df['prev_low'] > df['Low']) & (((df['prev_low'] - df['Low']) > (df['High'] - df['prev_high']))), df['prev_low'] - df['Low'], 0),np.nan) ADX_data = df.copy() df['+DM_5'] = Wilder(ADX_data['+DM'], 5) df['-DM_5'] = Wilder(ADX_data['-DM'], 5) df['+DM_15'] = Wilder(ADX_data['+DM'], 15) df['-DM_15'] = Wilder(ADX_data['-DM'], 15) df['+DI_5'] = (df['+DM_5']/df['ATR_5'])*100 df['-DI_5'] = (df['-DM_5']/df['ATR_5'])*100 df['+DI_15'] = (df['+DM_15']/df['ATR_15'])*100 df['-DI_15'] = (df['-DM_15']/df['ATR_15'])*100 df['DX_5'] = (np.round(abs(df['+DI_5'] - df['-DI_5'])/(df['+DI_5'] + df['-DI_5']) * 100)) df['DX_15'] = (np.round(abs(df['+DI_15'] - df['-DI_15'])/(df['+DI_15'] + df['-DI_15']) * 100)) ADX_data = df.copy() df['ADX_5'] = Wilder(ADX_data['DX_5'], 5) df['ADX_15'] = Wilder(ADX_data['DX_15'], 15) #Stochastic Oscillators df['Lowest_5D'] = df['Low'].transform(lambda x: x.rolling(window = 5).min()) df['High_5D'] = df['High'].transform(lambda x: x.rolling(window = 5).max()) df['Lowest_15D'] = df['Low'].transform(lambda x: x.rolling(window = 15).min()) df['High_15D'] = df['High'].transform(lambda x: x.rolling(window = 15).max()) df['Stochastic_5'] = ((df['Close'] - df['Lowest_5D'])/(df['High_5D'] - df['Lowest_5D']))*100 df['Stochastic_15'] = ((df['Close'] - df['Lowest_15D'])/(df['High_15D'] - df['Lowest_15D']))*100 df['Stochastic_%D_5'] = df['Stochastic_5'].rolling(window = 5).mean() df['Stochastic_%D_15'] = df['Stochastic_5'].rolling(window = 15).mean() df['Stochastic_Ratio'] = df['Stochastic_%D_5']/df['Stochastic_%D_15'] #Relative Strength Index (RSI) df['Diff'] = df['Close'].transform(lambda x: x.diff()) df['Up'] = df['Diff'] df.loc[(df['Up']<0), 'Up'] = 0 df['Down'] = df['Diff'] df.loc[(df['Down']>0), 'Down'] = 0 df['Down'] = abs(df['Down']) df['avg_5up'] = df['Up'].transform(lambda x: x.rolling(window=5).mean()) df['avg_5down'] = df['Down'].transform(lambda x: x.rolling(window=5).mean()) df['avg_15up'] = df['Up'].transform(lambda x: x.rolling(window=15).mean()) df['avg_15down'] = df['Down'].transform(lambda x: x.rolling(window=15).mean()) df['RS_5'] = df['avg_5up'] / df['avg_5down'] df['RS_15'] = df['avg_15up'] / df['avg_15down'] df['RSI_5'] = 100 - (100/(1+df['RS_5'])) df['RSI_15'] = 100 - (100/(1+df['RS_15'])) df['RSI_ratio'] = df['RSI_5']/df['RSI_15'] #Moving Average Convergence Divergence (MACD) df['12Ewm'] = df['Close'].transform(lambda x: x.ewm(span=12, adjust=False).mean()) df['26Ewm'] = df['Close'].transform(lambda x: x.ewm(span=26, adjust=False).mean()) df['MACD'] = df['26Ewm'] - df['12Ewm'] #Bollinger Bands df['15MA'] = df['Close'].transform(lambda x: x.rolling(window=15).mean()) df['SD'] = df['Close'].transform(lambda x: x.rolling(window=15).std()) df['upperband'] = df['15MA'] + 2*df['SD'] df['lowerband'] = df['15MA'] - 2*df['SD'] #Rate of Change df['RC'] = df['Close'].transform(lambda x: x.pct_change(periods = 15)) df.interpolate() df.dropna() df.to_csv("data/gc-1m_all.csv") ```
github_jupyter
<a href="https://colab.research.google.com/github/aayush9628/cs480student/blob/main/06/Copy_of_CS480_Assignment_6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ![CS480_w.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAoAAAADtCAYAAAAvOMSOAAAf83pUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjarZtpklu5lYX/YxW9BFzMWA7GCO+gl9/fASmVVC7b5YhWhnJgknx4dzjDBdKd//3Hdf/Dv5pCcynXVnopnn+ppx4G3zT/+dffZ/PpfX7/+FX4Pvrb4+7s74sCD0W+xs+PdXy+2uDx/NsbfR6fvz/u2vc3oX3f6PuLH28YdWWtYf+6SB4Pn8ctfd+on883pbf661Ln5wb8+j7xLeX738JnYeH7rvrZ/fpAqkRpZy4UQzjRon+f22cFUf8tjvdVnyvPs1j53mJw70v7roSA/HZ7P756/2uAfgvyj+/cn6Ofz18HP4zvM+KfYlm+MeKbv/yF5b8O/gvxLxeOP1cUfv/F2iT2z7fz/X/vbveez92NVIho+VaUdz+io9fwRN4kxfeywkflf+b7+j46H80Pv0j59stPPpZ1C2TlOku2bdi1874uWywxhRPISQhhkSg91shRDysqT0kfdkONPe7YyOEKx8XIw+HnWuxdt7/rLWtceRtPDcabkeR//eH+3S//mw9371KITMEk9fZJcFBdswxlTp95Fgmx+81bfgH+8fFNv/+lsChVMphfmBs3OPz8vMXM9kdtxZfnyPMyXz8tZK7u7xsQIq6dWYxFMuCLxWzFfA2hmhHHRoIGKw8xhUkGLOewWWRIMZbgamhB1+Y11d5zQw4l6GGwiUTkWOinRoYGyUopUz81NWpo5JhTzrnkmpvLPY8SSyq5lFKLQG7UWFPNtdRaW+11tNhSy6202lrrbfTQIxiYe+m1t977GMENLjR4r8HzB4/MMONMM88y62yzz7Eon5VWXmXV1VZfY4cdNzCxy6677b7HMXdAipNOPuXU004/41JrN9508y233nb7HT+z9s3qP338F1mzb9bCy5SeV39mjUddrT/ewgQnWTkjYyEZGa/KAAUdlDPfLKWgzClnvgeaIgcWmZUbt00ZI4XpWMjXfubuj8z9rby53P5W3sJ/ypxT6v4/MudI3T/n7S+ytsVz62Xs04WKqY90H88ZMC3/vefTODWuYifWbXVBFtXPsfOowfaNJyff1+J+7LR4ByTHvcPS6+5wzwaPwKR1ez5gT9mTHOZzrj+pXl7f1uZua7N7QjotnzgJaVnG+uZqu8JfpcfiY3N2BvdKKFj6WWXMzS3tUXovNeSTb16pllvI7pqL6HmyZDePafmEfSf11GftztAQN3UDKFnN3lyXmzrc2fB3nhhK83GDr+22Bh7nzduuTCy58z3SrbxbLdH52/guLa5B01AXeYKuZeVzaw5ztkJeejxcLEzW1cnC2JXbauTGH5upEa7pNqVZ+im2Ey9JviTwZ/GNAdxpGvmst+9ErdZB7eR9jTeJccU92lpFpdKCuVHhluZzg//vIVz+k5MxSuPOxvWLEhxnKW518GWrnGibQM6ogbPi3OU4wrKXUdmVi+21AEDeP4Ub4iS/PlKhNA50VVfgVne8OVMhOV1qnlA2qGtTkKzOSiWdhHFOf9eqffFZC6s5Xpszbyqfq3MLbQajfsEHyj5nKpUADx+Td7bXaVz09HpYRChz7VC0njHptmidNidFlC2JI1Y5Uj6LsNq6fqfTfRl3TJdnHmtR0GGVcy+NvqudnlM9RpgaQb2XNkgshiUXSJTrpntrv6fEu85cfS547djdca46L5RKFHlZF7lXUIDFrAq3i76bP5m+uIVf2qFayhm9xEFNpVo3Wcust+YyQyVmJZS9Rpm+0Mkh3hbPFiDteOquh247hIak9R5bppRDHejieV3zJOiglFk/qLokMfpJjUqzNkqdSIWFfLG5Ew0NnlbeirKOasGU5rxNxeJuo6nARJKau+3Dy6bd7sMcA+kctwdSuiprJ95+Uj+8CVkrVGwyyiX0MpvxRuFyIavEZtzUWDsBRW3SioB3OaQf9PQjPuK/i1o6Q3Ja2PPHV/fnB/7jV8q93DbjOBT47EJ5UtzdSBD4nqlf7jAnyiPnqsKk0m9Ju65OVQ7KOlPInlYtANZdvcZicHDt29Ly1S0a/O6TQ4VkCEdTODq3Q2cC21QigVq5Kts8SPfUpkzvFjfXlYa7Db3hGh2QL41HiPYhkaqlAiCU6wkPBRbGmlWNcXLj6sBBuhPgqfOlYGSbFxYxmhQgpXsmt0zSWHYroDYci1ZG6Ojr9qRmdyqVrI12GokLvCAeAJwind7BWxFuA/wy3AMieJafjT41gdg6QQjZKcgTAPrZ4JUGiO2z1Q5llnEmoOlKHrBSp6DPMVFo3I1yAZciZcVVLxcmNy0Q+A4W7lhhJZgT6WSdtgmgOuB/fFswR6rjxn52A7d0Scjuwv2g08pQDNXUJZkjKhuYm8QS3xLoAnpW8Bddp8RYPElk6USD1K3Qd73p6Nrcb4ZoMlRDxecEqlYSv9AbgTiOw03gsk51kCDdMsA83qCAhAQ67ENCS9gFoD636J6rTagjT6EDhVNJ4iWy17LWPIqDWvzLP9B+W4G8KuawZwqiIxfI7sX2XLkRIT/PaUIqmDyTbIq2NuAoTkeoW55Af1gVyNwo1As/0w0NxRAQtTKhiIDG1TJygpdCHbvq1dwvYYhhde9ufwIrqv72pRm4XBDCV0H+KtAQtUX3CplzQWwNAGyIz1k2igsaL4CwQ5ycCUsZ/VTbalxuzsFyKViS1NATq+wxUqfr4X2Ye+QVLvwNY66KskMN0WuN2lC2qEsiO2HYuhLckyLgxJVCAuwM4UC50yaXknl5DqSIMgKegUAQEeWvmoVdNUgIAPqG++n6fbhZgB+PBCbOjrJCpOiSq491uDeExWujVNoYAJsYgqqictS5xjv6abN0lU6CpHvZzTJsh8aZGw14STh5b5vmRhRE0deKVHaFDY4ZcBlRfwdKG9xPiWJcYAIK3iBMo7Ey3T6RYf6tpAfkREFH1Uyno7NvKsAPFE4Etj8AUQfn9kbUIq9IOwV5wB9UASE4IHHuUb3HXSEWBqk6frtCO1CYKueNMQLmOvdDzYCBJSpCRCdGiAYfJJlBm8DeUBqsMQc6IKM623GdBMG4+YxEhp+eJksgTcdJ0FjQuqmeCWE1JADQQLHtshblhMBsbV3FxTXTKAKb0Ez6DLGwaHTe5iCbAREwYJKXmmba1AwkWNRy6GzBLiIN1VZ7305GOEMD3PKGgi7hhmAn2h3wKggzgHoAViuOQZzoM+gY/cblNMhBD6iCLbvM4uCphmSrIlGgvKguuW30iS0gblZkMt3bseVVvoLajgUgg4wAqAydtuzwK7wKiqrjPN0J69PW5cXSVgz4j3qrNK2wkIx6FC3LC1pH3gTldvjAGUVOt031OW7nqMa6vwUxBgmjV/AEdAxYnPESu1BDFakhrckT0Z5ZXzNZQ0NiiVO2v/M1Bvo20ku8GRImQXpGOHlIxo/Md+GMuI92pr+oVTgxDslulGRH56HlDekBrgvkCuQ15kE5reIBoII8xqa9SqbJ+Ibkz45lxznhJ+CHgLW6NJekuCQWPX1F34cGN0QuOv9ACMHRoRurAg1fqXSQjDeLr6vXABgAQXB5TKEVhqlhu0KnGKjHASdQoAVhRrBHoloCvi/es84Wc17FdAviOhVN/1A/hiCa9cpnqHaNoPXVomQXVpTX4kUgKV/vEc/Ygzj6mozR6tl31lmFtKlXlMFIlPTKME3k8YwhGOBZoP2Kk2Y5Ha7wSPCBl1r6odGlwECiWmH/UzIduMFowAhhjQrmPogy3A0MAeQDTzspoui5EAZINYt2v8NA9PEihUnbR+Oa4fterNwHtNRRMcC9M0p2YA+CW5Sx6YKJ7sUBI2fCQjKDG7pjxFVUfUs/8I/bTLgaowlNXU9GAQ+wZznuAUtEcUXafdKVtwl6LsC/QRJ0Al8gzgZUBr5c6ofK91QQOpkgtqIenw4PcAkqJr3LC1NSBx5E2ybIBfNDiT4lIsNE4+TjdcMAaUOPYKfT3IZp2C7QYiSlDUFhw8kAmVfDAEwkJoOcY/wA/631k/0wWL/U3lhGSVc1rd+3uUV/4JOsoNQO/GezY0x0W82zVD4v3FohhkiddvE4KPOXBmCEItF3dHxxe2MFuL1ZyRIfdDOOK0rViKS4IwCGJpTPmqgjxM6jqtypyLHe3UC/w+Ght/QbbLQNQKOJPD2hSgIxMPSGqQib6kUaoBoOxkXjCLipJWkPeYU7sqsVzbsKZEOPIpLRYdUQ1Ej9Qu2UjsAdcqBjz9NfvC5aDNhUh364Fwt33DAYEy6iDJsWhQyBlhYqirbDkcek8kaWoCoG+hhfjRmH0ODPasSXMriBXoOAYXk8TkS/nQ5QgO68ACu8H9zSrdzqFMhHnNxKAwVI8fuxCPwyDTG4oKNaplpPnq6HG1EQqFIgPZA4rBEBxFGi+lA3l5tflPbSzWPkk4YapGDQgk7rIyD+7ShouLVkXGKR9jjcSYFoz5hEiubsoyUsOiQwqJ+IZUGkA2GUhBtAFMwJ3Im/unRByAc3XEf3WBFk40QVooMB1It+xX9YfJKCxFCgvZ09QnFTLjp3j5akb5GE5DbKTw2wDBnI3dGmvGq3KU4H3eD3epDTWRKzU8B0s0SEACaAHLBbU17IMjXHjyNBiHRLoESJB4uRBwclduL3ZrGK3/Dq2FtzWbIRnAU7rzqZAAbcBatAHaGWsvjd1mqatyB8d4c3AIj2mAAj1ijzg2IzrZvUYue485VgdDi4AwieRZDNjiELmhb1K8mnkRatgnOBb3UNRIRHVDveFm2J8uv9I+Su0cebCEH3tDGMC+thwuh73CZSmBbmJzzmiCA31TVAzOz2s1GkdiyVNFg8kWfYFhAkvTEAUH/x9zRaxw0USS4AGwAkmARyUvvUgqbHGnICShK9OPmcIOSGfUAzQI/cBuIWQsBTRrlt9B/Rhao1gfM+YgIPOOXwgtlLYjeBiz38wyZDvDi7LnbD6cYlRYsqjajJTAtpF0uC4I+vrhIUmuTp1NVSlEa+VC588QHWfH484eevaYNfnqDs0f2aHOI9aDAeO0PSqNcFX4FOGDB8ikgYRRCL2gvi920/XPQ0RuHHKOT0DqjnlZAkuQGKW08dkEEJ1p4hGL1FjWgUNaomRZKlUAyNu6i8iz56nJdcRmWwlCmTia4wVH/AwWJCEZO4Lg26FmsoCI+CTM3vPyiKtTzVXtKB5OgmIoA0b01Gcnt2o6hnJbnnaY9vUAqJmFKA8SEicuSC6C3JMRoxASpcPRAU4CzZlIgMCE7NU3iwXJG6b5d2ReQQ8qx5nmamsusT3gYUp6aT9Q4nJY77wBGapnKUCr4okCQMbiIulOk+kg9dlw+AjQwFZY8lQRQggWhhTIirBee8KdyMq0URNbgaS4epoc5QBzIy0fAYTZJdYyfsB4SI4wdOqYuyaLuA78eA0CoeGQ2kp1o1MJgNK4/8eeVJiuTs7quf+30Mr6kp3VB/FYkl1+7V1P15/k0wSSkdH6ZmNJrM0aLoJUNC+ZddT7fkF3LQJGCcgPeOt3BiDzxjHYgylApQiPk6KvervZ8PJXsIkWoBrDVakj8nPlTB0cCUyEHnjshBkfj0jiyQE5O2RxVpSK7dCRZWR9WEndeMBboSJQ2mPNhoGs8SHDSI09BlbfRsNhCgor9SlAIGkqRZ+33DoIv+wvKhZgtGsOPsm2+jlATgWsNhRGwWmAa2yjFkxap5HCq9CakALxutPFrQoB1/plbFs/gOoIIRhrHoaBleWtwdZwPWyOC61dz8DK1i94emDLgC00y203zg6KtpRJRqItAq2gTlXWSjoGykSK8RXRdhFVoL8F6jd2JGp0Ju4sAJ1vHshB19F81S1dzqTECqIHk5uIqSn5bKWKJ3cE/qtVNHNDpFz/OkK6OkEFqO68LubcesCfmA5w4rzNERTHKfCxlESh30931zoSNeHzsj+gNYj2KAj4LOKOTsIUCI7cZNBSCfyehy1PymIqrEDZGUPkN78Q4AFeookiC0PnpSU78kw4stlWRZW24AiKJmWbtgpHN1GZD92A03R7ASJowKbUFD6KjZjKcygM6KR/8YDSqfip1EDPRZ7tAMmBeijQKRm4OB8Gg45REBY0g14QseLVAILFR9Coq8rYRDRiYiBeHkpPS5IxQluo+Sh9yvXAP3oxFpLxoVzPHGnyAWAoGkkIGmSYKmtJXyhFgd9Z/LGVrwLhdIuev1FxIBTMaxD+6M6HGn5F+0FQJaI1AfafYs0bW1t+cuWZiqKMApAsOmERZM/VI2ROV81QzebmmX92jYHkgdTI8XzCNxpkmB80pu1ZCis6g3J6XWsJWy+tDBwqkSfpzM6lYpR7RGAGdk7dHFVg4mIJXGrWmn5CZWjVLogh/ellIGbbAY2GCxNtgCiaPXrJ6J2GOFiG8oKS5tDJY6HXZSEnCV0MpGLqJr4DTDT0h61Pm22D6vIRjUStWksWFboECNxkHjmEbB+GmHKQ9B0VoYqoVN0BaBJADusqlPkjahkG9kEKKCBMFbND7Uw1egC6HpVsEEYXOBaopckp/U4VWXfo+AB4XWuPhhSMe8uGHBLEQb/1y7tgc+I05XM+9NCyBngV7Ci8URtChAQB7QQdqo0I2Eabqz8YobywpJocAN+Y3Ub67jxydPEQCi/Kge1KynEEte+o56832dMNFEIYL1B5MgRSczrD0YhIq2PRyxycTUNFPlUqtRrHxCDk3KVDmmjqh+7Td+BgtWKJzeUY81o8VyMO7Xo/w9ziLxIm0IaoaI5ntTXXQdXmf3tGvYfmn3C5/ew+Iz7++jmGGjIFkuqhbNSJ66trkurIDSrXJUfHATr56BoIaqpfRAtP06/5BzeIKqAtqTuiq6bZjrq4tGbhFGQ6NLneToEUvLayyuIc6nlD2GAnCbQk8CjTWFFLTZHR2e+8hX0cUo8jCM9QeRztICREYaTPQ1NfUR6TeQCnCv5VH4UGFbSEBtKT6RgQ7uiV/82zm4eZwhL0ipNk2UjvZyCJSoGpcBGK6aDSxAfj34x0Eu4Jwn4gV1TWxmRHePI+BjeX1InlO1F76VpsyzNfwb2qQBoOdzLsywovXJh6qNWaTflsR4eiMAulZr9dp31+ZgNCCYSk+roTG6th8mulF7o+QK7g+aYGhcywMikKXRpXQuLiyFlKZppyPF3aQZsQewX3lb8mPSC40Ig1hYUe4Rj+kBUBQ6nK9hS9BeMy+Tv/5DlP87Te7+tihXQ7xB1UWxmLbVEJe0wQXZLOMgCS4ClbRqhgza+amNooX9hAM0bjxWABJ4Bei/UdOehozG4byxDpIiJtbhuvafr584PLTCBJia9hMyrkxDi/jhpaGzOWQSQUcrBC1atIg7xSNRs2RNzdlQcIjoBl5I+i7YCTb7qKuLTICTpg5MABnAukQkq8RLgpRTw2T6ZGFFgdAOrHnNq3RoDtyChmglBM7VWGfnugx0DFSW4F3bniyYUsGrweFmcw0H5mU0JCIE3YMCGJQIr9b0e0h0XWz60PwYv5q0dO36csNIz03AsSoTqwpBxltZCs0M5eLdNCbiaUjjgiVBqZUR0Hc0Y6k5mk5paLaHqYifUxQoRXRxRLE9Vv1U93twaJMW3T8Olfl+O5LcqHYkyy/5oIDe/iQYAFZ4B8rz8x8ZaTppIntDE0T8FLLtsxtJ/TxvmwauRxOmBV9py5TF7lhd0imRZ77BAm1yaudYOU2IDEFR4DHCoxHpV9xHleLv0r7qIBMlQU9WORFkZX8WMslUor4BemIkjoUVNPcvp2jTd8Q3P0Crwn0l+Ellc/cIbTBCGwUV9DgYId14LNom96iG25OOAoCiYNXoMh80hdeuI6wiRK7Dpf1G/ryNdl9OWHuT5M3iC/izNBw+2nt+jVp1woJvazmfTZ+tkXqIqCdX4KalM1KIL50LaVdnXAj41s3nO8dnw26hC/CCkfhxTeI5Iq8MmkVPQBw6QiTZnppZYBW0pam5NME5q2AFCWgfMjW4tKMBhzYOI8HBJAJ9epNWtavkEB1oIiA8xEBln3/1fixR7sw+UnRlXIoGj/A0WEF/ulgJMRyRwLCuEYumrUhpdHuBudB7lOU2HO/2OnqjCZSAyutQEOhY8Lt8gzs6YvnUEhSRZIGxiwDVkuB6jpqyb1oS7f7WhQ4Ruzfsc4vokK5HNc8WhGYcEHQ55PhgekBMmz06dDSoVegmICNbx6IhAN8h3+d8kfFzq8h5KzcDtZ4h8F4RBwAuin4gPA7GnHaGlpEkmuXAnzKa+Ca7474u1zEPar5qLOs2GUmII5k8AAelgxrVjn3FKaUA9TY4ovqF74aljygfkofpgKy09fKiYDgdaSPUkosqFa/DN10nSgG7CAXmt3cpxDioCvSMpO4jAXFG7trKMQrPAZTNuqQwDgKshzwQpJiArsNBOrCUKSPNxLkU1LZRuZSzUVJ+8ib9M8xrMK0GOxk7l5pP9nYj0QgdMugRM5d1MK5RMsmifK28OOXmU9VUk46H3oHU4Lgo3S2ITxpxIbL0QdfBtKwMm4BP2xgs7brYm93YG1MfCjBqBoK2lxfh1iQqACcEaJG19BJ1Z3NBjVpEB8Ch13FrQF+YJGVgGkOh7DXan8Y9OnERMlbHJNAf7TM31dmaFnWmq2lgWqUFkaLrA73gBj21S9HZH+0g4iCOyynhMOghndTBcyC1dc5Fh+OQyFHHE6pm2nCDgd1TWDZLfDM+DbaztTd5dpJEVBJgjSCRhFcfFVpqAYOIrByW3L3w62LZMSr9db+CbZDNQFohh5p7942OfOOwKjBqB4EJOuoMxMZF1cRtdQ3dAEXCizdCjJ4gFTzim5b4W7EQmGOZBcReDtzNngnXT2N0QkqhahqFErwaXjUV0W0BW+dJro5MRJ1BUWUvZTFy6xbwQAdU2dp97p9DcYhUpO/UTsXNKi4a50j5Y6W15RyW7CAiFum35dHpWRZ6NdrT0Wt0tX+nELXjDQ/LT7HwBeIjQVOMUTynMwQyYJrBX1ZENWkbg1ZAWQIKWafOLsBRtywc6aeyAbf9jhXRr5oyalNLvlsKx2C+66A0fM2iOlfqkp7YMNsAQqALpHKE7iOs0exGFQqe4bMT9DUaMQJKY7iMzJSIR1zjrDCWwAZgoAMY8KHmBvHClYt6aAJ/FnmPzshC7+AUMYzam+iOiE1YFcc8pLz6G+4/Jq6laQhAYYd+oPUi6fGxwzDubdiCZHQJkaBvERHkVNQS6cJ3jhMZ/dmMA8VNgiTzXLxgEQHjWsj4pGaR+Aq8RjHUNA5yvkNBOLxKMor2e0j7FSoJzTWfaOgb2JK0a7MGJB8sgyd0qAKPr7mN366mhLLioVVQQvLQrzEzPbXf1LcCSiiWCwvQhrzD7BgRnU4F0zrNRgE02+9PGC66SWOXraEtt/6ZM9Zh4lnK/tDBAHwAvxDDEtYzSl8LgZF7pkMSmBoN4nW2RRNRIPveiVqoBdiEJa/OR0DNsjZbG4s1UElD0wAEW40K3cA3XUeHQnrveGT9zM2jobAWLFzQmiu+I5IBH4YWvtpzRldoiL0gxwi6wQFwW3Xh4Ajq0HbDpoqG79g4fITXZmEbHUQHD5YOp0UNnkGHiyLAGGqCCqPWjxZwtGH/OpbnCYEVRFV99Jw0q/8cU9aBS+pQs7e3bmT+85lj6RQ1XOkG/BMwpohS7YMg5Bcij06emkCajqBunSCoOuBDJ38kiA4iaFL6Dj8W1EZxucH4KSUdXmksXGc6n3qXuGm9T7TrE3tAkE4weERyScSfoKbhaXxb9Nh0OgwlY/cQMkuE1bd47QiPq3TyPiizob9DAMtBR5KnmaN2B0mrdlKlIW/JFCb+IIywdSIOTfz+FqGDvpVQJEgYs8xrMKoZe/gcs0cbLR27MslhDLPDHRBsgD7g8LSnUSp2vki1d2rqm6f6F3nKHwXwcjOvu5/UqJJfamSLxzyrfYoFDyMZTCFb0AFsgFcDL1ngonMB2j3zAeZ2mgfwL4vRDHJFbuKQgC48nfYUqKalDRR4AeOtv7gAy1Cx71gMCscbTIzTc/5dGch826kT04Iv1y5k0k5hvHhRAAzo1TSA29bRBB30kFbB++eJAdtRbxQo8CDfL4/SSdNB0dIhKIfzLhKOwDIP3h61qqVDHOU5DC4Wtf8BtLtMtBOv1581HB2g29ql1Vng7uHZi681GlVbkpvk42/vRNikohHvmw4PXS64FrUhDHdh+OjMWV5jzxmwJDrV2jX3pHWBfkABzvUKn44Gag8Tzeznw1OHNEKiDhk+HIIsRJcJ35/YBY0p4PT6djC1s6kN4NMLkiFglQwxRMkBcQ5aRUkdzbU0qoQcIVKNWXFMMIemrdI0ptm6/p4haTqDHjhNO5FAxWDZgKQTcGmnyPj11q8pJB2PfBtMmjjprDxcj1pGicHc6m80kTZQjYqJgAx3yq0diPEUep2XaRMDZxHkTWB8ygNzGezpoZDQ6yjwtwFPBb397tF1hAcgSe7Kz3dcPxSeCjGRj+f2gMmI+gHvTtRBfB2Dh0GvTs12RB6dR7XQnnctnaV1ZIQVTh3/PcC/zn++dDbgagQdyDjJC4jnkBO6Oi0wtT1ZkFkeNU792MrJoUAFNyDueWcQlvaN6bT6TgxxEW3NXumYt0dbyrulXBA1YvCM35o60emS9sSRT7ED9KCQprw0Q5DxBIKwKAOQuIOSTFofKmlTIqNG7LeOPAgasBhOk0/79BtikiIznZqjU27gx6n4I4Bb1NC1FZ/hAsLq9ccWKOmIYkF9lZ0drYzlx0V1noYx1p/yDFkamKNI+GovmybWJYgLfJMQrKsvHYk/UCqLQjYcJ2GBgChIthcm8u83xQiMqRJ0ZoEGRF2WfAMCiiJIhS5UCyS1bEAomP7y4GiegRmc79RsmUbc21YFavRJILBvKIWa6UMdAl1Uc3zgwv0hBJYGIXu6zB3SNheQ1V+RafQE4VOp/wdi1canzzj4UAAAAYNpQ0NQSUNDIHByb2ZpbGUAAHicfZE9SMNAHMVfU6VSKg4WEXHIUJ0siIo4ahWKUCHUCq06mFz6BU0akhQXR8G14ODHYtXBxVlXB1dBEPwAcXNzUnSREv+XFFrEeHDcj3f3HnfvAKFRYZrVNQ5oum2mkwkxm1sVQ68QEMIAIgjLzDLmJCkF3/F1jwBf7+I8y//cn6NXzVsMCIjEs8wwbeIN4ulN2+C8TxxlJVklPiceM+mCxI9cVzx+41x0WeCZUTOTnieOEovFDlY6mJVMjXiKOKZqOuULWY9VzluctUqNte7JXxjJ6yvLXKc5jCQWsQQJIhTUUEYFNuK06qRYSNN+wsc/5PolcinkKoORYwFVaJBdP/gf/O7WKkxOeEmRBND94jgfI0BoF2jWHef72HGaJ0DwGbjS2/5qA5j5JL3e1mJHQN82cHHd1pQ94HIHGHwyZFN2pSBNoVAA3s/om3JA/y0QXvN6a+3j9AHIUFepG+DgEBgtUva6z7t7Onv790yrvx/xInJz/ZaLfwAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAATrwAAE68BY+aOwwAAAAd0SU1FB+UCBxYME13qmlgAACAASURBVHja7J13eBRV+/e/W7PpBQLplFCSQCAgHUMRCFWkCYIYioCCBRF5kEceiCAiRYooiBKKIEhTeqSFAAm9EwhFSiCV9Lqbbef9wzf5EXZ2dzbZTb0/1zUX4cyZM2fOnNn5zjnnvm8BY4yBIAiCIAiCqDUIqQkIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBEECkCAIgiAIgiABSBAEQRAEQZAAJAiCIAiCIEgAEgRBEARBECQACYIgCIIgCBKABEEQBEEQBAlAgiAIgiAIggQgQRAEQRBErUVMTUBUNtnZ2Th+/DgiIyORmpqKlJQUJCcnQyAQwM3NDfXr14enpydCQkLwxhtvwM7OrsLqlpCQgD179uD+/ftIS0tDcnIykpKSIBQK4enpCXd3d7i5uWHAgAHo0aMHrKysKrTdIiMjeeXt0KEDvLy8LFIPrVaLq1evYvfu3Xj+/DkSExPx7NkzWFlZoUGDBvDw8IC/vz9Gjx4NHx8fi7ZJUVERzp07h8OHD+Off/5BcnIykpOTIZVK4eHhATc3N7Rt2xaDBg1CQEAAhEL6BiYIopbCCKKSOHnyJOvTpw8TiUQMAK9NIpGwt956i924ccNi9VIoFGzlypWsS5cuvOsFgDk4OLCxY8ey2NjYCmm/devW8a7boUOHzH7+wsJCtnDhQtaoUSNedRAIBOz1119nf/zxB9NqtWatS0pKCvv888+Zra0t7zZp2LAhW7t2LSsqKqKHkSCIWgcJQKLCuXbtGuvdu7dJ4opLTIwZM4alpaWZtW7Hjx9nfn5+5aqbUChk77//PktKSrJoOw4bNoxXfaytrVlhYaFZz33s2DHWpEmTMrdRv3792JMnT8pdD7VazRYtWsSsra3LXBdvb2925MgRejAJgiABSBCWYtu2bUwmk5VLYL28NW7cmN2+fbvc9VKpVOyDDz4wW70AMGdnZ3b06FGLtKNCoeA92vXWW2+Z9dwLFiwwS/vY2tqyU6dOlbkemZmZrH///mapi1AoZAsXLmQajYYeUoIgSAAShDmZPXu2WQVW8ebo6MiioqLKJf7GjBljkbqJRCL2ww8/mL0tT548ybsO69atM9t5Fy9ebNb2sbOzY9HR0SbXIy8vj7Vu3drs9+uTTz6hB5UgCBKABGEu1q5daxGBVby5urqyZ8+emVwvrVZrMfH38vbjjz+atT1nzJjB+9yJiYlmOefGjRst0jYODg7s3r17Jgn2AQMGWOxerVmzhh5YgiBqPALGGCNTGMKSREVFoU+fPlCr1RY9T/v27REdHQ2pVMr7mC1btmD8+PGWN7cXi3H8+HH06NHDLOUFBAQgLi7OaL527drh8uXL5T7fs2fP0LJlS+Tl5VmkfTp27Ijo6GiIxcYdEyxduhSzZ8+22L0SiUS4cuUKgoKC6OElCKLGQj4QCIuiVqvx4YcfWlz8AcDly5exevVq3vkTEhIwffr0CmuHUaNGITs7u9xlxcfH8xJ/ADBgwACz1H/KlCkWE38AcPHiRaxYscJovhcvXmDRokUWvVcajQYzZ86kh5cgCBKABFFWtm7divv371fY+ZYtW4aCggJeeWfNmoWcnJwKq5u5xMvBgwd55x06dKhZhPXRo0ct3j6LFy82eu8WLlyI3Nxci9clMjISERER9AATBEECkCBMRaPRYMGCBRV6zrS0NKxZs8ZovqSkJOzZs6fC2+THH3/E8+fPy1XG8ePHeeXz8PBAq1atyl1nPu1pDrKzsxEeHq53v0qlwvbt2yvsXv3+++/0EBMEQQKQIEzlxo0bePr0Ke/8Li4umDlzJo4dO4Zbt27h0qVLWL16NVq0aGHSebdt22Y0T3h4uEnT0l5eXggLC8OJEydw+/ZtnD9/HuvWrUNISIhJdVMoFNi0aVOZ21Qul/MWgIMGDSp3pIucnBzs3LnTpHZatmwZzp49i8OHD2PmzJmQSCS8j//ll1/07jt//jwyMzN5l9W8eXOsWbMGMTEx+PvvvzFv3jyTosgcPHgQSqWSHmSCIGomZAdDWIqwsDDelpetW7dmycnJnOUUFRWxqVOnmmTJacgiWKvVMh8fH95ljRo1iuXn5+stb//+/SY5Iu7YsWOZ2/To0aMVGv3j8OHDJjl3zs7O1inj1q1bzNvbm3c5+vrBrFmzeJcxevRoTufXjx8/ZgH+AbzLOXv2LD3IBEHUSGgEkLAYfOPUAsAff/wBNzc3zn1SqRRr1qzBm2++ybu8Q4cO6d337NkzPHv2jFc5/fr1w++//w5bW1u9eQYPHozNmzfzrtulS5fw4sWLMrWpoet6GWtra7zxxhvlvoeXLl3iPfK3Y8cOODo66uwLDAzEpk2bIBAIeJV1584dzvSkpCRexzdq1AgbNmyAtbU1576NmzbyHhl98uQJPcgEQdRISAASFuPRo0e88gUHB8PPz89gHpFIhLVr1/KeTtQnIgDg3LlzvMoQi8VYt24dRCKR0bwjRoxA69at+Y66Iz4+vkxtytcwISQkhFMAmQrfen744YdwcnLSu79Xr17o3r07r7L++ecfzvSUlBRex0+ePBk2NjZ693fs2BE9e/bkVVZ512sSBEFUVcTUBOWHMQaVSgW1Wo2CggLcuXMHt27dwv379xEXF4fnz58jJycHOTk5EEAAewd7ODs7w8vLC/7+/vDz80Pr1q3h5+cHOzs7iMViiMXicq/fqkw0Gg3vF/bIkSN55fPy8sKwYcN4rUlLT08vtwAcOHAgGjZsyO9LSijEZ599hgkTJvDKz3c061VhpE8cvUq/fv3Mch/5jlR26tTJaJ7WrVsjKiqqzOfk25/atWtnNE9AQABOnjxpNF9GRgb9wBEEQQKQKI1KpYJKpUJGRgaioqJw6NAhnD93Hrl5uVCpVNBqtVCr1WD/RlxBsc/tzIxMZGVl4enTpzh37lyJ4HNwcECXLl0waNAg9OjRA3Xq1IFEIuHlHLeqkZubC41GwytvcHAw73J79OjBSwCmpqbq3cd3ZJLviFUxo0aNwvvvvw+tVms0b3Jyssltaor7l8GDB5vlPmZlZfHKp2/6/mVcnF14laXPFYxCoeB1vIeHh9E8devW5f2MEwRBkAAkSl4KRUVFuHv3LjZt2oRDhw4hOzsbSqUSKpUKxoKrMDBAgxKBVFRUVCKa/vzzTxw+fBhOTk4YPHgwJkyYAH9/f1hZWVUrIWiK9WS9evV45zU2VVyMoRFAvqM6zZo1M+mara2t4e3tzWvatCyOsU+cOMErX7t27XiJoPKIMUs/XwRBEAQJwCqDVquFQqFAbGwsfvjhBxw9ehR5eXlQKpVGRR8fiqeSVSoVCgoKEB4ejl27dqF///749NNP4e/vD2tr62o9NcyFi4sL77wODg6875U+0tLSeJVRv359k6+ladOmZV7fZ0yI8ZmyBMwX/cNUIW/O54wgCIIgAVglUCqVSE1Nxbp167Bx40ZkZWVZ9OXIGENRURGUSiV27tyJY8eOYfLkyfjggw9Qv359k3yrVWVkMhmsrKx45+drSWoIvlOJrq6uJpdtrpG3Vzl9+nTJSLExhgwZYrbzVsbHBt+lAwRBEEQ5ft+pCYwLsYKCAkRHR2PIkCFYvXo1UlNTK2xkhDEGpVKJFy9e4Pvvv8eQIUMQExODwsJCujllhG9MW1OEaTF8LIbLAl/3Lx4eHrytkflQGdOx5hD5BEEQhGFoBNCI+MrNzcXGjRuxZMkSpKenV9roBGMMcrkcN27cwOjRozFnzhyMGzcODg4OteqFWa9ePXz55Ze88unD3t4e+fn5Rssoi8jv1q0br9FZU6KbMMZ4u38xR/SPUl+IlTACWNOWOBAEQZAArEZotVpkZGRg0aJF2LJlC3Kyc/413qhkNBoNUlNTMX/+fMTHx+O///0vnJ2da81L09PTE4sXLy5XGfb29ryscHNyckwue/z48Rg/frxZr/nBgwe8Q+qZ4iybD5Ya0SQBSBAEUbnQL60e8ffixQvMmjULGzZsQHZ2dpUQf8UwxpCTk4NffvkFs2bNQlpamlmMUGoLfK2O+bqLsTR83b+YK/rHy8hksor/KhXTdylBEAQJwEoQf1lZWfjqq6+wZ8+eSnGDwVcEFhQUYNeuXZg3b96/IpVEIC+aNGnCK58poewsybFjx3jl69Onj8EIGNVFAEqlUuqkBEEQJAArVlTl5+fju+++w+7du6us+Hu5voWFhdi+fTuWLFnCa10bAfj4+PDKt23btkqPBJGXl8cregZgvugfL8PXRQ+ftbEarcas5yQIgiBIAJoFuVyOPXv2IDw8nLelaFUQgQUFBfj111/x559/Qi6X0400QteuXXnly8rKwpgxYyq1TSMjI3lb4por+sfL8PWFePfuXaN5+PpH9Pb2pk5KEARBArBiUKlUiI2NRVhYGLKzs6tV3RljyMrKwvz583Hnzp0yRZmoTXTp0oW3H8Vjx44hODgYV69erZS6Hj58mFe+1157DZ6enmY/v6+vLz+hetLwdLlSqcTZs2d5lWWKhTRBEARBArBcAio7OxtffvklkpOTq+VaOsYYEhMTMWfOHOTk5NB6QAPY2dmhZ8+evPNfvXoVHTp0wPDhwxEdHV1hbavVanm7fxk4cKBF6tCjRw9e+cI3hhsUeN988w0eP35stBxHR0eTQ/ARBEEQJADLhEKhwM6dO3HlypVqPXqm0Whw8eJF7Nmzh3fUiNrKxIkTTRZjf/75J4KDg9GqVSusWrUKmZmZFq1jXFwcEhISeOU1Z/SPl2nfvj0aNGjAq32GDRuGP/74Q+fZmj17NhYtWsTrfKNGjaoU1zMEQRAkAGsZGo0GiYmJWLlyZbU3oig2Ylm+fDmSk5MppqoBhg4dWuawbbGxsZgxYwY8PT0RGhpqsVHBAwcO8Mrn7u5u1ugfLyMWi/HFF1/wypueno7Ro0ejVatWmDBhAkaOHAlfX18sXbqUV1+USCSYMWMGdU6CIAgSgJZHpVJh69at1Xbql0sEJiUmYceOHRUWrq46IpVKsXTp0nKVoVAosHXrVgQHByMgIAArVqxAVlaW2ep4/PhxXvlejf4xZswYNGvWzOh2/vx5XuV/+OGH6Ny5M+963759G5s3b8bu3buRlJTE+7j//e9/8PPzo85JEARBAtCyFEfV+O2336BQKGrMdckVcmzcuBFpaWk0CmiA0aNHY+jQoWYp6969e5g5cya8vb0xffp03hav+sjOzsaZM2d45X3V+vf58+d4+PCh0Y2vdbNYLMb+/ft5+08sC++88w6++uor6pQEQRAkAC2PWq3GgQMHalwkDcYYUlJScOjQId4uRGpl5xcK8fvvv6NLly5mK7OgoAA//PADfH198dlnn5XZhcyJEyd4+daTyWRmj/7BhaurK06dOmURC92xY8fit99+oxBwBEEQJAArhqKiIvzxxx81avSvmGLDFpoGNoy1tTWOHz+Ot956y6zlajQarF69GkFBQbh48aLJx/O1/g0JCTF79A99eHl5YdiwYWYvd+rUqbzd8hAEQRAkAMuFSqXCvXv3cO/ePV4jLdUNrVaL27dv48GDB+QX0Ag2NjbYu3cv1v601uxi6sGDB+jduzdvH3jF4vHIkSO88loi+oc+vvzySyxcuNDs5fbu3Rt///03dUSCIAgSgBUjkM6ePVtjI2cwxqBQKHD27NkaKXDNjUgkwtRpU/HgwQNMmDDBrK5I8vPzMXDgQN7OpGNjY5GSksIrryWif3Dxww8/YMmSJRYpWy6X4+233640Z9sEQRAkAGsRGo0GZ06fqdFr5FQqFc6cOUOGICbg6emJjRs34vHjx5gxYwacnJzMUm5eXh7Gjx/Pa0r+4MGDvMq0VPSPV3nw4AFvVzDlEckTJ06k0WqCIAgSgJajOH7u5SuXa/QLR61W49KlS8jPz6fIICbi4+ODFStWIDExEb/++ivatWtX7jJjY2N5jaIdO3aMV3kDBgyokLaYP38+7w+l1q1bY+/evYiPj8edO3ewZs0aODs78zr21q1b2Lp1K3U+giCICkBcGy+62PlzYWFhrRC6ycnJcHZ2hlgsph5vIjY2Npg0aRImTZqEK1euYO3atdixY0eZDYfWrVuHOXPm6L0XGRkZiImJ4VWWpaJ/vExOTg7++usvXnnHjRuHX375BVKptCQtICAAI0aMQO/evXHnzh2jZezYsQMTJkyosf0pPj4eW7ZswYULF5CUlAShUAgvLy907doVkyZNQp06dYyWcffuXWzbtg3Xr19HSkoKRCIRvL290b17d0yYMAGOjo4m1enZs2f47rvvSqUJBAKsXr2as5/m5ORg27ZtOHnyJJ49ewYAaNCgAfr27Yt3330Xtra2Osds3LgRV65cMale3bp1wzvvvEM/QgRhQZFQ6ygqKmL79u1jdnZ2DECN3uzt7dmBAweYUqms0DZOSUnhVT+ZTFbt+k9mZib7/vvvWaNGjcp0T44fP6637B07dvAqw93dnanVas4yXn/9dV5lnDx50ui1HjlyhFdZ/v7+rLCwUG85169fZxKJxGg5YrGYKRQKzjJ8fX151SU2NtbodX399de8yvrkk0/M0mfUajWbN2+ewTZwcHBg27dvN/i7NXXqVCYQCPSW4eXlxSIiIkyq27x583TK6dmzJ2feY8eOMTc3N73n9/HxYWfPntU5bvTo0SY/J2PGjGEEQViOWjsFHB8fXyuMI7QaLeLj42kdoBlxdnbG559/jvv372Pbtm1o3LixSccbWuPH1/3LoEGDKiRm7uXLl3nlmzhxIqytrfXuDwoKQq9evYyWo1arcf/+/Rr3ezN58mQsWLDA4FR6bm4uxowZwzkNrtFo8Pbbb2PdunUGl3MkJCSgf//+CAsL4z0bwnW+8ePH66SdP38egwYNMmig9OzZM/Tr1w83btygHwqCqOLUWgGYlZVVK9bFaZkWmZmZ1NMtgEQiwbvvvovbt29j1KhRvI+7efOm3pcxXwH45ptvVsg1Pn/+nFe+jh07Gs0TEBDAq6x//vmnRvWTXbt2YdOmTbzzf/DBB0hISCiVtnbtWt6xoQHg66+/5pU/OjoaT548KZXm6OiIt99+u1SaSqVCaGgoLyOmgoIChIaGkvcBgiABWDXJy8urFQKQMUZGIBbGxsamJCYwHy5fvsz5crxx4wbS0tKMHi+TyXiNppmDjIwMXvlcXFyM5nF04Lc2ja8LnOrCt99+y5nu5uYGe3t7nXS5XI7vv/++5P9KpRKLFi3iLKN+/fp6fVd++eWXRp/7zZs366SNHj1aZzT39OnTnMK8Tp06nOsWb9++jaioKL3n9fb2RqdOnUq2Vq1a0Q8JQZAArBhRJJfLa40ArOnGLlUBiUSCuXPn8spbWFjIKaz4jvD06dOnwqJ/VMboMR8RXF14/vw5bt26VSrN1tYWkZGRSE5ORmZmJj777DOd4/bv31/y96lTp5Camlpqv4eHB65cuYKUlBTk5ORg1apVOmXExcXh3r17euuWn5+P3bt366RPmTJFJ+3cuXM6aZMmTUJycjKSkpLw/vvv6+w/efKk3nN/+OGHOH/+fMm2cuVKnTweHh70w0IQJADNj1QqhUAgqDXXSliebt268V6XxyVy+Lp/qcjoH5URJrGoqKjG9InHjx/rpI0aNQo9e/YEAIjFYixbtgzNmzcvlefJkyfIz88HAJw5c0anjKVLl+K1114rKWP69OkYNGgQpwjUx549e1BQUFAqrU2bNmjTpo1O3hcvXuikLVmyBBKJBFKplNO9UU5ODu924loXW1HLHAiCBGAtQiAQwM7OrlYIQIFAAHt7+1ojdisTmUwGPz8/XnlfneZMS0vjHTPY3HGLq5oYq0lrx7KysnTS2rdvX+r/YrEYb7zxhl4BlZiYyEsccfULQyKMy/hDnwuevLw8nbSXp/25lgCYMvPw6tpXOzs7dOrUiX5UCMKC1FrHcA4ODrVKABIVg5ubGy9/d9nZ2aX+//fff/NaktC2bdsKif5RDJ9F/+ampkcDqV+/Pufv0asUW+7n5uaWSpdIJJz5GzZsyLsOT58+xanIU6XSrKysMHbsWLPdk5eP+fzzz0sZSnXo0KHk78ePH+tYfvft25dmLgiCBKBlRJGnp2eFuNGobIqdxNIIYMXAN+rFq1OrR48e5XVcRUX/eFWE1PRzViRCoe7Ey8CBA1GvXr1SacVhCO3s7EqlazQaMMZ0nukmTZpg/vz5pdKCgoI467BlyxYwlP7gGDFihN7+W5b10i/fx3bt2umNpsM1/VtRRk4EQQKwliEUCtGwYcNaIwB9fHw4XzqE+eE7avGyAFSr1Thy5Aiv44YOHVrjxVhttFgPDg7Wa0Vet25dnXvyzz//oGnTpqXSGzZsyMv/n0ajwW+//aaTPmnSJIMfzeYQulxwrX3lWs9IEAQJQLMIQC8vL0gkkhp/rRKJBF5eXjVGAGZmZmLdunVG83l6enI6s3327BkvP3MeHh681/OVeqB4htt7eW3drVu3ONeKcYn5mzdvIjY21mC+Vy1G9b54jx7T8TcXGhpqOQHIU0OQy6LSNGjQQCdt+vTp2LdvX5mmSc+dO6djnOLr64tu3boZ7Htl+e0xRkFBgY61cIsWLeDt7U03niBIAFpGALq4uKBJkybIzMyssVNOIpEIzZs3h7Ozc40RgPHx8bzcrQQEBHAKwF27dmHWrFlGj58yZQrWr19vcv34ipeX/ay9usZLHxqNBhMnTjRbWy5Zqmu5+aoANCuk68rEsGHDdFzFREREoFmzZpg9ezbee+89nWliQ3A5pR43bhyUSiUkEgmn2OMSmkqlsiSda60onzpFRUXpGBqFhITQTSeIitBCtfGiBQIBxGIxunXrVqNHASUSCbp37857VIooP3wXyzs6OlaL66mMvkPLFUrj7e3NaSUcHx+PadOmwc3NDZMnT8bVq1eNllVQUMDp+2/hwoWwtraGtbU13njjDZw6VdpAxNbWVueYv//+u5QgfRVXV1ej9Tl8+LBOGk3/EgQJQIu/ZLp16wYrK6sae41SqRSvv/56lV7rWFWn+8paL76uL6qLAKyMvlMb1uaayoYNG0qMQrhE3YYNG9CuXTsEBwcjOjpabzl79uwp8S/4MsUxilUqFU6dOoXevXtjx44dJfu5nDKPHj0aM2bMwIwZMzBmzBid/a+//rrRZ+xVAWhjY4OuXbvSDScIEoCWQyKRoF27dqhXr16NtJAVCoVwc3NDu3btqvQIYFFREecLSR9c/shMbRc+JCUllal8vs5vudx4VEVkMlmFn5NGrHVp1KgRoqOjjcZTjo6ORvfu3bFlyxbO/du2beN1Pq1Wi6lTp5YsT+DyyVdYWIhVq1Zh1apVkMvlpfbZ29ujS5cuBs8RFxeHZ8+elUrr06dPjf4oJwgSgFUAgUAAR0dHDB06tEb6m5JKpRg6dGilObw2pU1Nif3KN68+gcXlg40LPr78uDAUeutlKtKXX3moDB+Sr8ahJf6lRYsWuHHjBjZu3Ah/f3+D4m3KlCmIiYkplZ6cnGwwPBvXx0zxNG/Xrl3RokUL3seOGDHCqJAj9y8EQQKw0hCJRBgxYoRJC6irC3Z2dhgxYkSlrXE0ZeTo7t27vPOePXuWVz53d3fOdC8vL17Hx8fH4/nz5yZdc3JyMi+Bam9vDzc3t2rRj/iOVPIZMU3PSOdVlj5fdHyF4auWzVzwjTdc1T4OJRIJJkyYgNjYWJw6dQojR47kHDFVKpVYvHhxqbQTJ07oLG0ICgpCTEwMYmJiOH0GFq8rFIvF2LNnD6dFMhf6Ioq8DJfvSwr/RhAkACsEsViMgIAAhISE1ChjEKlUin79+sHPz6/SptNkMhlv8RAeHs4rn1wux59//skrr76RPn3C8FUYY1i4cKFJ18xlXclFUFBQtTF04LOQHwAuXLhgNM+NGzd4laVvdJTvvbty5YrRPMZc6RRTt07dqvnDLRSiR48e2LlzJx49eoR33nlHJ09kZGQpf5Nc17xjxw506dIFXbp0wfbt23X2v7w8w8/PD7du3cKKFSswevRoDB48GN27d9c5pnHjxkbX8WVnZ+vEOG7evLlJ0UwIgiABWGYEAgGsrKzw4YcfVptF+Xyuyd7eHh9++GGlrN96uR58/egdOnQIDx8+NJpv8eLFvNfm6Zsia9iwIe9oHZs2beItFB49eoTly5eXq27VWQD+9NNPSE/XP8IXERHBe/RWnw84vtP3P//8s8G1omfOnMHpqNO8yvLy9qry98jHxwe///47Bg4cqPPB9PLoOtf61GbNmnH+XcyrEWscHBwwY8YMbN++Hfv370eTJk10jgkNDTX6gXP8+HGdmM/k/oUgSABWKGKxGG3btq0xawGlUinefvtttGrVqtIX0+sLQ/UqWq0Ww4cPR3JyMud+tVqNJUuWmDQiN2zYML3tM3jwYF5lqNVq9OvXD5cvXzaY79q1axgwYAAvZ85V+UXHNQrOV6ympqZixIgRnFOrFy9eNMl/YePGjTnT+a6bTEhIQGhoKKcIjIuLw/vvv68TBk0fvr6+1eOHXCjknD7NzMws+ZvLQv1locYl2oqtg7nIz8/Hzp07dT78uPxvcn0QvMqrApYgCAvrn9reAAKBANbW1pg5cyZOnjyJJ0+eVNtIBMURTj7//HPY2NhUen169OiBX375hVfe27dvw8/PD6GhoXjjjTfg5uaGxMRE3LhxA7t27eI1QlhMu3btDK71Gzp0qF4ryVdJTExEx44d8eabb6JPnz7w9/eHvb09MjMz8ejRIxw9ehQRERG8/f/Z2Nigf//+pdI6d+6sV/yWhcGDBxsVrcC/8WBfFqNcArB9+/a8z3v69Gm0aNECH330Edq3b4+8vDxERkZiw4YNvJ2t+/j46IQ+e/m6lixZwqucffv2ITAwEFOmTEHbtm2Rl5eH8+fPY+3atTqOh/Xh8Hyl7gAAIABJREFU7OysN35tVYRr+jQ7O7vk71dH3Ph+nOnjjz/+0LHg79mzp9F1ghqNRsf9i0wmMxiJhCAIEoAWE04NGjTAnDlzMGvWrFI/mtVJyDo4OGDu3Lnw8fGpEr7U3nzzTdjZ2fF285Kbm4sff/wRP/74Y7nOO3XqVIP7+/fvjyZNmvAKCQf8ux7wwIEDOHDggFna5FVxbmVlZVajEL5uNLy8vIyet1mzZnB3d+ctUNPS0njFo9VHnz599O7r0KEDXF1deRtwxMfH46uvviqXkK6otcG//fabzvrWTZs2wdnZGdOnT0dBQUFJeseOHTF58mSdMrjq+vKoX1k+bA0ds3nzZp20sWPHGi3z5s2bePHiRam0Xr16kfU3QVS09qEm+L+1gCNHjsTIkSMrde1cWZHJZHjnnXcwbNiwKuNHy87Ozqyhy/jg7+9vNJyZVCrVsZCsKN5///1q1a9EIhGmTJlSYeczdC6xWIz33nuvwupiqXNxjardvXsX+/fvL7UVfzht3boV4eHhJdv+/fs5y311vR5Q2nCmLIZH+j4k4+LidNzM2NraYuTIkUbLJPcvBEECsMqJQDs7O8ydOxcdOnSoVlbBEokEHTt2xNy5c6ucS5uvv/6a9+J9c7B48WJeax+HDRtmNFKBuSmeRq5uzJw5k7cFbnl4++230aFDB4N55s6dy9uIpzz079/fLKKE63eEy5CJa4S1WHy9+kH39OlTznM9fvxYJ+3lpRBcdXlZjHIJU32/g7/++ivn/eMKGfcqL4eQe/nZIAiCBGDlNcb/j56xbt06tGzZslpEJBCJRAgMDMTatWtRv379KudexMnJCd9//32FnOujjz7CW2+9xfte79ixo8L88UmlUqxYsaJaPhf29vZYtmyZRc9ha2vLq584OzuXa4qZD2KxmLdFtzHq1aunk3b8+PFS/8/Pz0dkZKTOB2mx0H3VKvrOnTu4ePFiqTSNRoN9+/bpnOvl/s0lnO/fv1/yN5cTcy5H4EqlElu3btVJ5+P778WLFzp1b9y4Mac1MUEQFoYRpdBqtayoqIhdvnyZBQQEMJFIxABUyU0sFrMWLVqwK1eusKKioirbphqNho0dO9aibREcHMyUSqXJdYuJiWFWVlYWrZtAIGDh4eEV1t6vv/46r3qdPHnSpOdiypQpFmufv/76i3dd1Go1Gz58uMXu18aNG812L9LS0phQKNQ5x6xZs9iNGzdYVFQU6969u87+zp07l5Tx+eef6+x3dXVlmzZtYnfv3mVXr15l77zzjk6eBg0aMK1WW1LOqlWrdPK0atWKnT59mp0+fZq1atVKZ/93332nc0179uzRydeoUSOm0WiMtseWLVt0jp06dSq9eAiiEiABqOdlp1Ao2PXr11n79u2ZVCqtcuJPKpWyjh07suvXrzOFQlHl21SlUlnspd2tWzeWlZVV5rpFREQwJycni92rtWvXVmhbW0IAMsaYUqlkvXv3Nnv7LF++3ORrLCwsZB06dDB7XebMmWP2+9GrVy+T6/Hrr7+WHB8XF1emD9FZs2aVqsedO3dMLiM2NlbnegYMGKCTb968ebzaYvTo0TrHHjp0iF46BEECsGqJQKVSyeLi4lhISAizsbGpEsJPIBAwW1tb1r9/f/bgwYMqPfL3KkVFRWzixIlmbY8xY8aYpQ3u3bvH/Pz8zFo3Gxsbtn79+gpvZ0sJQMYYy8vLY6NGjTJL+0gkErZq1aoyX2deXh4bMWKE2UbTV61aVWrEzFycO3eOCQQC3nVp3749U6lUpcqYPXu2Sdfj5OTE0tPTderCNVKob3vrrbd0jn/+/DnntTx+/JjXB4Sjo6POh2x+fj69cAiCBGDVE4EqlYqlpKSw//znP6xOnTqc0zkVtQmFQla3Tl02Z84clpqaytRqdbVs1yNHjjAvL69ytYW3tzfbvn07r2knUwRFWFgYs7e3L/e9GjhwIHvy5EmltK8lBWDxc/HLL78wmUxW5vZp3Lgxu3TpUrmvVaPRsNWrV+sIC1O25s2bs9OnT1v0nvzwww+865KQkMA5gs5XvMlkMnbixAnOeuTm5rLAwECjZTRp0oRlZmbqHL9gwQKdvD179uTVBjExMTrH9u3bl140BEECsOqi0WhYbm4u27dvH3vttdcqfDRQIBAwGxsb1q5dO3bw4EGWl5dnVuFTGRQUFLBNmzZxrn8ytDVt0pQtWrSIFRYWWqxuGRkZbM6cOczX19fkUZf33nuPRUREVGrbWloAFvPo0SM2ceJEZiXlv4ayfv367LvvvjP7qE9WVhYLCwtjderU4V2Xli1bsq1bt+qMtlmKAwcOsCZNmuj9uJs4caLBpQwajYb9/PPPzN3d3eBa2Bs3bhisR25uLhs/fjzntLJQKGTvvPMOp/hTq9WsUaNGOsfwXd/KNYq5dOlSesEQRCUhYKyahr0om8ELNBoNGGPQarUlTk6L/xUIBBCLxZzWv4wxqFQqpKenIzw8HL/++ivS09Mhl8stVl+BQACZTAbXuq6Y8sEUTJw4ES4uLnr9/KnV6lKhmwQCQYnFa/G/IpGoJL2qkJSUhMuXL+PChQt49OgRMjMzkZGRARsbGzg4OMDd3R0+Pj4YNmwYAgMDK6z+jDE8fPgQBw4cwMOHD5GWloaUlBTk5OSgbt26qFevHlxdXeHi4oIePXqgW7duVSKc4M2bN3k5S+7YsSOnlaep5Ofn4++//8b58+fx8OFDpKenIycnB2KxGA4ODvD09ETz5s3Ru3dvdO7c2aLW9Wq1Gjdu3MDhw4fx8OFDpKamIjU1FVKpFPXr10f9+vXRtm1bDBo0iDNyhqXRaDSIjY1FXFwc7t+/D5VKhUaNGmHgwIG8LdKVSiUuXbqECxcuIC0tDTY2NvD29kbPnj3RqFEj3nVJTk7G0aNHS6IfNWrUCCEhIXpD7mk0GkRFRek4h+7atSsvJ85xcXFITEwslda2bVu4uLiQNSZBVAI1WgAyxqBWq6HVaqHRaJCXl4eEhAQ8f/4cCQkJSEpKQl5eHgoKCiCXy2FlZYWpU6eiVatWen/QtFotioqKkJiYiG3btmHz5s3ISM+AokjBOxyYMSQSCaysrFC3bl1MmDABY8eOhbu7O6ysrDjdvDDGUFRUhLt37+Knn35CYWEhZDIZ7OzsYGdnBw8PD/j4+MDT0xM+Pj6wtbWFSCSCSCSqVv4OCYIgCIIgAagXlUoFtVqNgoIC3Lp1C9HR0Th//jxu3rwJuVwOjUYDjUZTMgpYvAkEAnh5emHpsqXo27cvbG1tOUebikcQVSoVMjIyEBkZid27d+PChQuQy+VQq9Ulm9EbIBBAKBRCLBZDKpVCJpOha9euGDFiBLp37446depAKpXq9cjPGENhYSFOnjyJmTNn4vnz59BqtRAIBCVb8cifUCiEtbU1AgMD0bVrV3Tp0gVBQUGws7ODRCKpFn4PCYIgCIIgAVhKCKnVaigUCty8eRN//fUXDh06hNTUVBQVFZkkyFxcXPDxxx/jo48+gpOTk8FRMo1GUzL1+uLFC9y8eRPnzp3DlStX8PDBQxTKC0sJzeJzFAszW1tbNGnSBO3atUOXLl3Qpk0buLq6lggyQ46d1Wo1srOz8csvv2DlypXIzMw0GLz9ZcEplUohkUhQr149DBgwAEOHDkWbNm1gbW1dJaYxCYIgCIIgAWhQ+KlUKuTm5uLEiRMIDw/H1atXIZfLoVQqjQoifSLJxsYGXbt2xaJFi+Dv7w9ra2uDYqx4VPDl0UWlUon09HRkZWWhsLAQBQUFYIzB1tYWtra2cHFxQd26dSGVSiEUCktGAo1F82CMQS6X4/79+/jf//6H06dPl8QNLcu1SqVS2NjYoHXr1nj//fcREhICJycnEoIEQRAEQQKw6lE8zXvy5EksX74cd+7cQWFhoVnW4gkEAohEIri6umLatGmYMGEC6tatq9cAw5AoLBahL48AAv9nlGGq2M3IyMCWLVvw048/IfVFainDj/Jcb/GIpL+/P2bOnImQkBDY2trS1DBBEARBkACsfF4eAfv6669x+vRp5OXlQaPRmCTuii1iBQIBBBAAAu681tbW8PX1xaeffooBAwbA0dGxwi1ptVptibXl6lWr8eDhAygUCr0jnMVTzsUC9GUhagyRSAQ7Ozt07doVX3/9NVq0aMHLyo8gCIIgCBKAFkGj0SAnJwe///47li1bhpSUFKMjYMXTnMXr3lxdXeHr64tGjRqhTp06sLW1hUwmMyjqioWgt7c3OnfubBb3GaYI3oKCAly8eBHx8fGQy+UGxVyxpXJhYSEyMjLw9OlTPHr0CC9evChZD2lserxYJNerVw8zZ87EuHHj4OTkZNKIJUEQBEEQJADLjUqlQkJCAv475784EnEE+fn5ekXMy2vbfHx8MGDAAHTu3BkBAQGoV69eqdG/4vy8Guz/+wo0tk7PEiJQpVLBlNv1soWzRqNBWloa4uLicOHCBRw+fBhPnjyBXC5HUVGRweu1s7NDSEgIlixZAm9vb1obSBAEQRAkACsGhUKB2NhYfPLJJ7h+/bpe0VI8UlenTh0MHz4cI0eOhL+/P2QyGW8ji5rIy9PBxSOAcXFx2Lt3L3bv2o209DTI5XJOgVkspgMDA/HDDz+gbdu2Jq2FJAiCIAiCBGCZxN/ly5cxefJkPHr0iNPIQyAQwMrKCu7u7vjwww8xatQo1KtXD2KxmJwd6xGExe5r0tPTsXv3bqxduxZJSUlQKBScx4hEIjRs2BDr169Hly5daF0gQRAEQZAAtJz4O3/+PCZPnoynT59yGnqIRCI4Oztj3LhxmDZtmsGoGYQuGo0GKpUKKSkpWL9+PcLDw5GZmcnZ1kKhED4+Pvj555/RvXt3yGQyakCCIAiCIAFoPpRKJa5fv46xY8fi8ePHnOv9rKysEBQUhEWLFqFjx46wtrYmQ4VyCEG5XI4rV67gq6++wrVr1zhHA4tF4NatW9GhQwdaE0gQBEEQJADNJ0b++ecfjBkzBjdv3tQZjSo2ThgyZAgWLlwINzc3WpdmBoqNTVJTUzF//nzs3bsXubm5OvlEIhFatGiBHTt2oFmzZuQrkCAIgiBIAJYPrVaLjIwMjB8/HidOnIBSqdQRf46Ojvjss8/w6aefwsHBgUb9LCDAc3NzsW7dOixfvhxZWVk6eSRiCbr36I6tW7eifv36Fe4bkSAIgiCIsiEKCwsLq2qVKiwsxJo1a7B9+3bI5XId8efk6IS5c+fio48+gqOjI631swBCoRAymQxt2rSBs7MzLl68qHMvGGNITU2FUChEx44daSqYIAiCIKrLe76qVUilUuHixYtYs2aNTnxbgUAABwcHzPrPLEyZMgX29vY06mRBiqfZx48fjzlz5sDR0VFHABYWFmL9+vWIiYkxS0g6giAIgiBqmQBkjCEnJwcLFixAZmamzn5ra2uMHz8e06ZNg52dHYm/ChSBU6ZMwaRJk2BjY6Nzz7KysrBgwQJkZWWhGoeWJgiCIAgSgJWBUqnEn3/+iWvXrukYfYjFYnTq1AlffvklHBwcSPxVggicNWsWgoODdQw+tFotbt26hV27dhmMKkIQBEEQRBV5t1cVIxCtVouUlBT06tULDx48KOXyRSAQwNPTE/v370dgYCA5dq4k1Go17ty5g7feegvPnj0rNdonEAjg6+uLyMhIeHp60rpMgiAIgqjCVJm3tEqlwqFDh5CQkKDj78/GxgbTp0+Hn58fib9KRCQSoVmzZvjss89ga2tbah9jDMnJyfjrr79oLSBBEARBkAA0DmMMBQUF+PXXX1FYWKgjOvz9/fHee+9R6LFKRiAQQCaTYcyYMQgMDNQZ5ZPL5diwYQNyc3NpLSBBEARBkAA0jFqtxuXLl/Hw4UPO0b9PP/0UTk5OtO6viohAJycnfPrppzqjgFqtFs+ePcP58+c54zUTBEEQBFE1qBLhG9RqNQ4ePKgTdkwoFKJxo8bo168f+ZirQkgkEvTu3RvNmzfHtWvXSol2uVyOgwcPonfv3lVuul6lUmH+/Pk6jsW5sLW1hbu7O7y9vdG5c2e4uLiYdK6oqCgcOnSIc9/YsWMRFBREHamKcunSJezatYtz33vvvYfWrVvX2nu9evVqPH/+XHckQSjE0qVLLXbenJwcHDx4EH///Tfi4+ORlJSE3Nzckme0S5cuGDFiBJo3b07PZy3rG0Q5YJWMVqtlqamprHHjxkwgEDAAJZuNjQ1bsmQJk8vljKhaKBQKtmLFCmZtbV3qngkEAubt7c2Sk5OZVqutUnXOzs4uVVe+m0gkYj169GDbtm1jGo2G17mWLVumt7ydO3dSB6rCLFmyRO+92717d62+161ateK8TqlUapHzyeVyFhYWxqysrHg9qwMHDmR37tyh57MW9A2i/FT6FLBarcb9+/eRmZGps27M2toaAwYMoDizVXHoWCxGv379OI1BcnJyEBsbW2OmgTUaDaKiojB27Fh06tQJt2/fpg5AEBYmISEBbdu2RVhYGG/3UocPH0ZQUBA2bNhADUgQRqh0AajVanH16lUoVaWn5UQiEQICAtCgQQMSgFWx4wiF8Pb2RqtWrXTiMKtUKly5ckVnPWdN4PLly+jevTvOnDlDnYAgLERiYiK6d++OuLg4k49VqVSYPHkyFixYQA1JEFVdAF66dEnHdYhEIkFwcDCt/auiCAQCiMVidOvWTWetn1qtxoULF3ScedcUsrKy8Oabb+LJkyfUEQjCAu+E8ePH4/Hjx+UqJywsDHv27KEGJYiqLABv376tM10okUjQoUMHcihclTuPUIgOHTroCECNRoM7d+7UWAEIALm5uQgNDSV3NwRhZjZv3owTJ06UuxzGGKZOnYrc3FxqVIKwlABkjJVp02q1KCwsREpKis6LVCQSoWHDhuUqnzbLbkKhEA0aNNCZAtZqtcjKykJubi40Gk2Zy69IbGxs0KxZMzRr1gwNGjTQiXnMRXR0NGJiYuhXhCDMBGMMK1euNJrP0dERrq6uRvOlp6djxYoV1LAEwYG4rA+pUqmEVquFUqmEQqEo0wtbq9Xi6dOnUCl1I0cUTzFmZGTQXarKXxBCIecorVqtxoMHDyAUCsvkv1EoFEImk0EsFpdslhwN7t69O44cOVKqbz5+/BgbN27E8uXL9UY3Wb9+PV5//XWd9HHjxqFr166cx7Ro0YI6Tg2C7rX5uHDhAmJjY/Xuf+2117B582a0bNkSAJCSkoKlS5caFI07duxAWFgY3TOCKI8AZIxBpVIhLS0NR48exYEDB3D16lXI5fIyf+1pNBrk5ukO0WdnZyM4OJimgKs4Wq0W2dnZOum5ubkYMmRIuXwB2traok2bNhgwYAAGDBgAV1dXWFlZVZiwbdKkCb799lu89tprGDFiBGe+kydPcqa7urryGqEgqj90r83H33//rXdfmzZtEBkZCQcHh5I0Nzc3rFixAmq1GmvWrOE87sGDB3j+/Dm8vb3pnhFEWQQgYwyFhYWIiIhAWFgY4uPjoVAoSqb4zA1jDFlZWXSHqimMsXKvvcnMzERycjJOnjyJ1atX4+uvv0b//v11XM9YmqFDh6J9+/a4fPmyzr7k5GQkJSXBw8ODbjpBlJMrV67o3Tdz5sxS4u9l5syZg59//lnvSH1CQkIpAUgQBM81gMXiLzw8HNOmTUNcXBzy8/OhVqtpETxhURGpUqmQn5+Pe/fuYerUqdi4cSMKCgoq9iERCjFp0iS9+5OSkuhmEYQZuHv3Lme6RCLBsGHD9B7n7u6Ovn376t2flpZGjUsQZRGAKpUKUVFRWLRoEdLT0mukfzeiaqPVapGRkYFvFn6DU6dO6f3StxR+fn5692VmZtINIggz8OLFC850Dw8PWFtbGzy22GhQ38ckQRAmCsDiqbwFCxYgPT0dDPQgEZUDYwwZmRn4+uuvkZ2dXaE/6l5eXnr3lXUNLEEQ/4dSqURhYSHnPjc3N6PH05o+gjANo2sAVSoVzp8/j/v379PIX1kUtlAIe3t7HUtYxhjy8/NrtK88S6DRaPDw4UOcP38e/fv3L5eRiSmYGtbu2rVrOHv2LOe+AQMGoGnTpkbLKCoqwr59+3D+/HmkpKQgISEBVlZW8Pb2hpubG4YPH4727dubtT7Xr1/Hpk2b8OzZM6SkpMDDwwNNmzbFtGnT0KBBA51jHz16hE2bNuH+/ftISEiAUCiEp6cnOnbsiNDQ0DK9lLmuWywWo379+mjevDmGDBmCoKAgkw3E1Go19u3bh4iICDx9+hT5+flwc3ND48aNMX78eLRu3bpMfcMc9xoAFAoF9uzZgytXriA5ObnkftevXx9+fn4YOnQoAgMDTbaqz83NxdGjRxEZGYnExESkpaVBJpPB1dUVAQEB6NevHzp27Fgma31zIpFIIJPJoFAoOH9HjdG0aVMEBgZy7rO3tzfbPXu1HxUUFKB+/frw8fHB2LFj0bFjRwD/Gp9ERERwljFw4EA0adKk3M9ocnIyXFxc0KJFC4SGhqJVq1Ymt3t16R+EZUZVjAbjnjVrFu9g3LT93yYUCpm/vz+7ePEiu3XrVqnt4sWLzN/fnwmFQmorEzeZTMa++OILJpfLTQp8nZ2drbfM/v37Gzw2KipK77ERERFmDTavUqnYwoULmaurq9G2aNOmDTt06JDRazdWn8LCQjZq1Ci9eSQSCfvmm2+YVqtljDGmVqvZ7NmzmUAg0HuMtbU1+/HHH0uOMYYp192hQwcWFRXF+94nJiayLl26GCxzzJgxLD8/ny1ZskRvnt27d5v1XjPGWFFREZs7dy5zcXExet1dunRhMTExvK65sLCQLViwgDk4OBgtNygoiB08eNBoma1ateI8XiqVmiU4va+vL2f5TZs2ZeakrPeMTz8aMmQIy8vLM3s/MvaMAmCTJk1iRUVFldI/LN03CPNjVAAWFBSwIUOGMLFYTOLDxE0kErH27duznJwcplKpSm05OTmsXbt2JADLsEkkEjZ06FBWWFhYYQJw3bp1eo+Njo422wsmLS2N9ejRw6T2EAgELCwsjGk0mjK98LZs2WL0pVa8TZ48mRUUFLBhw4bxrt93331n9N6U9bq//fZbowIzNzeXBQQE8Cqzffv2LCwsrMIEYFJSEuvatavJH5YrV640WO6zZ89YmzZtTH62Zs6cabAfWfol361bN733OikpqVIFoCn9KCgoiH311Vdm60emPKPvvvuu0eu3RP8gAVhDBWDfvn1JAJZDAObn5+u0a35+Pmvfvj0JwDJsYrGY9e3blxUUFFSYAOzUqZPeYxMSEszygikqKmIdOnQoc7t88803ZXrheXl5mXSeOnXqmCzUrl+/bnAErDzXvXz5coP37j//+Y9J5VlbW1eIAJTL5ax169Zlvu61a9dylpuVlcWaNm1qkX5k6Ze8oRGuWbNmVaoANLUfiUQis/UjU5/R/fv36712S/UPEoDVD95uYMiKiqitnDhxAhcuXODc16xZM7i7u5vlPPPmzcOlS5fKfPz8+fP1riEyREJCgkn5TY3OwxjDwoULLXbdc+bMwY0bN/Su11q/fr1J5VWUUc8XX3yBmzdvlvn4zz//HA8ePNBJHz9+PB4+fFiufnjt2rVKedZ69Oihd9+yZcuwYMGCCvcAUNZ+ZM713aY+o6tWrdK7rzr3D8K8UJgNgtBDUlISli5diqFDhxr8MTVHtJqkpCR8//335SpDo9Fg7ty5VbItIyIiOBf3m+O6VSqVXoH54MED5OTkVLn2ePLkCdauXVuuMhQKBRYvXlwqLSoqCvv37y9XuVqtFvPmzauUdhk+fLjB52n+/Pnw8/PDqlWrKjRQQFXtR/o4c+YMpyP+6t4/CBKAOggEAshkMtjY2EAqlZa5HIlEAmtr63KVUVnXb21tDVtbW8hksjJZawkEAlhZWcHGxgYymazWheCLiIhAs2bN0KxZMzRu3BgODg7w9PTE7NmzkZ+fz3mMl5cXpkyZYpbzb9u2zWRLY30//E+fPq1y7SuXy3H//n2LXfdff/2F5ORknfRHjx5Vyf62ZcsWs8yqbN++vZQw+fHHH81SvyNHjuj1yWdJXF1d0adPH4N5Hj9+jBkzZsDDwwOhoaGIjo62+AxVVe1Hhj4GuUaHq3v/IEgAlkIkEqFJkyb4z3/+g5UrV2LSpElwdXU1SQQJBAI4OTlh6NChmDt3LiZOnAgPD49qIQRFIhEaN26MOXPm4JdffsHs2bPRoEEDkwScQCBA3bp1MWnSJKxcuRKzZs1C48aNIRKJatXD8PDhQzx8+BBPnjxBXl6ewbw2Njb466+/UKdOHbOc++LFiwZfijt27EBGRgaeP3+O2bNnG+zfZ86cMfn8ISEhiI2NRXZ2Ni5evIg2bdoYPaZv376IjY1FTk4OLl68aNSNClc0BnNdN2MMkZGROunGnHQPHjwYd+/eRXZ2NiIjI9G8efMK6WuGrtvd3R179uxBVlYW4uPjMX36dL15lUplyfIEuVyOI0eO6M3brFkz7Nu3D9nZ2YiPj0dYWJjevIwxHDt2rFKew//+97+88ikUCmzduhXBwcFo2bIlVq1apfdjrbwY60ft2rXDtWvXkJmZiaNHj1ok7Jypz2h6errOR1hN6B+EGeFjBBISEmJwQSsq0ciiVatW7M6dO6ywsJApFAqWm5vLDh8+bNJCdWdnZ7Zz506Wk5PD5HI5y8/PZ7du3WLvvvsuc3Z2LrMBjCWNQAQCAZPJZOy1115jN2/eZAUFBayoqIjl5+ezS5cuMR8fH4MuOl4up27duuzIkSMsLy+PKRQKVlBQwGJjY1lgYGCVNFKxhBGIKVtAQAC7ceOGWReZt23b1qQF3aGhoXrz/+9//zOpPu7u7jquI9LS0pidnZ3eYzw9PZlSqSx1zIsXL5itra3eY/bu3WvR654xY4ZO/p9++klvfl9fX6ZSqUrlT0hIYPb29hY3AmnevLneY47j7ijwAAAgAElEQVQdO6aT35DV9dy5cxljjF2/ft2gEQGXFe3EiRMNWnxW1kL/RYsWlenZdHZ2ZgsXLtTpm+W9Z4b6kYODA8vOzi6VPy4uzuBvp6n9qCzP6KvnsHT/ICOQGmoEUlWRyWT4+OOP0ahRI1hbW8PKygp2dnYIDg7GoEGDeDkJlkqlGDhwIPr37w8HB4eSqWR/f3+sW7cO27dvR8+ePeHo6AixWFzp1ywQCCCRSFCnTh2MGzcOu3btgp+fX8n0t42NDQICAvDuu+/yGsGUSCQYNGgQXn/9ddjZ2cHKygrW1tbw9fXFxx9/DJlMRl9JrzBw4EAEBASYtcxXv9aLcXJywuDBg3XSDcUmNjZ6+SojRozQ6St169bFiBEj9B4zfPhwnefL1dUVw4cPr7Tr5loTplQqDV73q8+0p6cnRo4cafE+pC82raenJ+cU6Pvvv6+3rOzs7JIRbH188sknnMZKoaGhRsutDIpnNKysrEw6LisrC//73//QoUMHxMfHm60+hvrRqFGj4OjoWCrNz88PQ4YMMdv5y/KMcs1w1JT+QZgHcbWuvFiMgICAUi8igUAAqVSKwMBAiMVioxZjYrEYQUFBpR4ugUAAsVgMOzs79OrVC507d0Z0dDQ2bNiA6Oho5OfnQ6lUVmhkFIlEAqlECnsHe/Tr1w+TJ09Gq1atYG1tXWqqtrjuTZo0gVgsRlFRkdHrDwwM1Ll+kUgEf3//KiF6qxrLli1DVFQUTp06BVtbW4sKgmbNmnGmG4p7aupi9UaNGnGm+/j46D2mcePGJh9j6evmWvRuyBJT7zkaNLRo/1Gr1XoNGPRFoGjevLneKT8nJycAQL9+/XDu3DnOPC1btjS5PSvzBS8QCDB58mS0a9cOX375pcnTjTdu3EDPnj0RHR0NDw+PctenLP2IbwQYSz2jr1KT+gdBAhCA/hBBUqmU1zpAjUaDpKQkqNVqna/N4tE2BwcHhISEoHv37nj06BEOHDiAvXv3Ij4+HkqlEkql0iwL2YsRCUUQioQQi8UQi8WwtbFFx04dMXDgQLzxxhtwc3ODVCrVO8JZHC6NrxsCfeVIJBIK/6OHy5cvY/To0di3b1+5DWaKior0uh6xtrbmTDc0MsslhAyhL86qIXGr7xgbG5tKu24uDBkH2NnZcf92WFl27W9hYaHeeum7bl9fX6OuN+zt7dG5c2eT6mLoflUF119t2rTB0aNHERMTg2XLluHgwYO8P7yfPHmCiRMnIiIioty/Y4bawtPTkzPdxcXFbO1Qlme0NvQPopYLwPI+2EqlEnv37sW4cePQrFkzzhdMsRAsHnH08/PDp59+igcPHiAmJgbR0dG4evUqcnJyoNFooFarodVqS0bj9NWxeMqaaRmEQiGEIiEkEgm8vb3RokULtG7dGq1bt4a/vz8cHR0hkUggEokMGmcolUrcu3cP27dvNzr6R/wfwcHBCA8PLxmhycnJwc2bN7Fnzx6cOHGC85iDBw8iIiICAwcOtNjLpSLKK4uxjzkMhGrrC4RenKbTtWtXdO3aFc+fP8f69esRHh6OlJQUo8cdPXoUZ86cQffu3S1Wt4owlqttBnkECcAK+zFOSEjA2LFj8e2336JHjx4606qvCkHg3xHGoKAgtGrVCh988AGUSiWSk5Px7NkzJCYmIjU1FVlZWXBxceEsSywWY9q0aVAqlXB0dETdunVLNplMBpFIVDIVKxKJjI4yMcYgl8sRGxuLDz74AImJifSiMQE7OzudKZtOnTrhgw8+wA8//KDXEnP37t3lFoAEYWlUKhXS09ORl5eHnJwcqNVqODk5mdVZcUXg7e2Nb775BvPmzcNff/2F1atX4/z58waP2bZ1m0UFIPUPggRgNUaj0eDu3bsYN24chg0bhs8++wyNGjWClZWV3i+vl8Ug8O/UlJ2dHXx9fUumKIoFGJcxhlQqxbBhw0rKKt6KhZ+pD29+fj7+/PNPLFiwAImJifTgmpFPPvkEv/76K2JjY3X27du3DyqVipfBEcGfM2fOcK6tMudSi5pObm4ufv75Zxw/fhwxMTEVFuGkIpBKpRg1ahRGjhyJAwcOYOzYsXpdwJw5e4Y6Qy3rHwQJQJNFYFZWFn777TccOXIEoaGhGDduHHx8fCCVSo0aQ7w8WseHYsfLZYUxBrVaDYVCgWvXrmHZsmU4e/Ys8vPzK9Q4pTYgEAgwevRofPXVVzr7cnJykJ6ebrZwcMT/9e/yhKuq7Wzfvh3Tpk2rVtEryvpsvvXWWzhy5Ai6devGmefhw4fIy8uDvb09dYxa1j8Iw1AouFdeOsVTuatWrULPnj3xxRdf4MKFC8jJyTG7sUdZ6qdSqaBQKJCRkYGIiAiEhoZi2LBhOHr0KHJzc0n8WYhevXrp3afPlQlBVAbr16/Hu+++W6te7sHBwejatave302KWlG7+wfBDY0A6vnBUCgUSE1NxcaNG/HHH38gwD8Aw0cMR69evdCoUaNSBhmWCptWPMqn1Wqh0WhQWFiI2NhYHDt2DPv37UdCYgLkcnmlBEevbdSvX1/vPnq5EFWFhw8f4rPPPquV1x4YGIiYmBi9z6ivry/1j1rcPwgSgCYLsKKion/DLV28gOs3ruObb75B06ZN0bNnT3Tt2hX+/v6oV69eibWvUCgstaav+P9caDSaUusFX940Gg0yMzPx6NEj3L59GxcuXEBMTAxycnJK6kSjfRWHIfcjlgo/RRCmsmzZMigUilp57a86Y36ZwsJC6hy1vH8QJADLLASLR+DkcjmuXLmCmzdv4qeffoJEIoG7uzsCAwPh7+8PHx8fuLu7w9XVFXZ2dnBwcEC9evV0RgnVajUePXqEtLQ05OTkICsrC8nJyUhISMDTp09x7949ZGRkQKVSQa1WQ61WQ6VSkWVvJUFiu2Lx9vbGmDFjTDrG3NFZqhsqlQp79+41mOe1117DpEmT0LJlSzg5OSEuLq5CIp9UBIbWVJPoof5BkAA0mxjUaDQlPyrZ2dl48OABxGLxv/78hMKS6eGgoCAcOHBAx6FmUVERpk6diuvXr0Oj0YAxBq1WC61WWzLtS6Kj6kAjCBVL48aN8d1331FDmMCjR4+QmZmpd394eDgmTpxYKs3Q0oaKJjo6Gh988AHnvs8//9xgODzAsE9Y+nCu/v2DIAFYJSkWa6+uxROJRMjNzdX745Ofn0+GGzVAAJorHBxBlIf79+/r3TdkyBCdl3tVIyUlBXfv3uXcl5GRYfR4Q47vHRwcqH9U8/5BkAC0GCKRCDY2NhCLxSWje4YCgFclJBIJbGxsIBKJSqaqyTDE/F/P+nB2di5X2eY2IqouUQMsZTzFF33PiKVHiyx13Y8fP9a7b9CgQdX6+cvLyzOax9DvNQnAmt0/CBKA5RJQnTp1wuzZs+Hn54fk5GSEh4fjwIEDJeHdqiICgQC2trYYNmwYpk+fDk9PTzx9+hTLly/HwYMHKRScGdEXDs4cAlAqlcLOzo7TmERfXN+CggK95RlaDF+VMHTd+p65vLw87N69m3Ofv7+/TqxTQ9OC8fHxnOmWtuq2sbGBUCjkHPnXNxuQnZ2NP//8k3Nfy5Yt0aFDB4MiycPDgzPdXG6tzCGaDT1HT548MXp8YmKi3n3lnco01I+ysrI406vaspHK6h80/U4CsEqPQjRu3BgbN26Et7c3xGIxfHx80Lp1awwfPhyLFy/G7du3UVBQUKWmakUiEZycnPDee+/hq6++gpOTE0QiEVxcXLB69WokJyfjwoULFBHETF/Ov/32G+c+W1tb+Pj4lPscrq6unELowYMH0Gg0OqN6hpwkV6fRDn3X/c8//3DmP336tN61YKGhoToCkCsKTzF37tzhTL99+7bFn906deogLS1NZ5++keajR4/qve4pU6agQ4cOBgVHQkICZ/rz589N/r3kQqVSobCwUGetsykYcqZ+4MABZGRkoE6dOpz7FQoFjh8/rveDyM3NrdwfK/rQN21d1RyZW7p/WLJvEBbSP7W9AaRSKYYPHw5PT8+S0G8SiQR2dnYICQnBvn37sHr1agQFBcHBwcFoRBBLI5FI4OTohM6dO2Pz5s1YuHAhXFxcStzQSCQSuLi4YPjw4QZ/tAh+XL16FQMGDNDr6qVLly5m6ROurq6c6QUFBdi8ebPOF/Xq1av1llWdIh7ou+6UlBQcOXKkVJpWq8XatWtNGkEy5L5n7969OqOAZ8+e1SskKuK6Hz16hFOnTpVK02g0+Pnnn/WW5eTk9O/XvIF+ePLkSc70yMhIk+ptaHQ5Li6uXG3SqFEjvSIhLy8PH3/8Mec0L2MMc+bM0Wvg0LZtW5PDa5rSj3bu3Knz+xAbG6t3pLrSRnss3D8s2TcIC/WJWq+AhUI4OzvrfL0IBAJIpVLUrVsXY8aMwaBBgxAVFYXNmzfj0qVLKCgogFKptPgIm0AggFgsLpku69atG959910EBwfDzs6OU+QJBALY2NiU+0evNnHv3j3897//LXnhZmdn4/r167h8+bLB40JCQsxy/iZNmuDSpUuc+z766CMkJSVhwIAByMvLw+rVq3H48GG9ZbVo0aLatLuh6x49ejTm/b/2zjwsqvL9/+9ZmI19RxQREdTUEBJMrVzSBHJBMyvNrbS6KjOXxMqPueZWfV1aTDOz/GqJuRGamYiSmi1gfsR9AUQQFYad2Z/fH/1mviJzhplhZhjgfl3XXMpZnnPOc9/znPc8y30vWIBBgwahvLwcX331FQ4ePMhZVlRUVL1tPj4+nMcrlUr0798fy5cvR+fOnXHixAmDD9ibyMhIoz1HjDE888wz+OCDD9C/f3+Ulpbiiy++QEZGBmdZjzzyCADAz8+P85iUlBRMnToVgwcPrtNDtXr1aovu29Qw7RtvvIFt27ahU6dOVtWJVCrF8OHD8cMPPxjd//333+P69euYNm0aevbsCZlMhosXLzboF0OHDm20vUz5UXFxMfr3748FCxYgNDQUv//+O95//32nm4dtb/+wp28QJADtglqtxokTJzBt2jSjcaT0OXv9/PwwYsQIxMfHIzc3F4cOHUJaWhrOnTsHhUJhiNWnD+lijdC7P3yMUCiEUCiEv78/+vXrh0GDBqFv374IDAyEi4sLXFxcjAo8fRaTo0eP0kIQC7hx4waWL19u8QurodAU5tK3b19s376dU6gsWLAACxYsMMuPuHKiOiOmnruiogJz5swx+/tj7EUfEhJi8ry8vDyL4w3a6rn37t1rdJ9cLjc7W4NAIDCkKWzXrh3ncTqdDgkJCZgyZQq6dOmCvLw8fP311xYHMW/bti3nvtOnTyMiIgIdOnRAZmamyfvh4pVXXuEUgADwxx9/cP5g4MIWcewa8qOsrCwkJSU59XfN3v5hb98gSADaHI1Gg/T0dGzduhUTJ06Eu7u70VWUeiEoEonQtWtXREZG4rXXXkNxcTH++9//Ijs7G//88w/Onz+P0tJSaLVawyINY0KNx+PBzc3N8MvSz88P7dq1Q3h4OMLDwxEZGYmIiAgEBwdDJBIZBKGpFYSMMVRXV2PXrl04dOgQCUA789ZbbzV6AYie8ePHY+7cuY2eOD5ixIhmFbvLVs/97LPPGp3IHhkZCR6P53QT0SdNmoT58+c3OtLAhAkTDPPiHnvssQbbuk2bNjXqegMGDMBnn31m8pjc3FzU1tZaVf6gQYPw9NNPm+zhtlT8hYWFNbocZ/UjS7C3f9jbNwgSgDaHMYbKykr85z//wR9//IH33nsPHTp0gEQiMSq29EOy+mHZDh06ICQkBPHx8YZ4gFVVVZDL5aioqDAc9yAikQibN2+GUCiETCaDWCyuk0ZOH1Da3PllGo0GlZWV2Lx5M1asWEGJvu1MdHQ0Fi9ebLPyvLy8sHz5csyYMcPqMlxdXZtd8GRbPLebmxuWLFnCWX5cXBxOnz7tVM8dEBCARYsW4d1337W6DG9vbyxcuNDwd1BQEGJjYxucttAYhgwZAk9PT7u2Lxs3bkRcXJzJVb3mIJPJLB7iNuWnzuhHlmBv/3CEbxC2hU9V8H8iMCUlBfHx8Vi7di1u3bqF2tpakyt/eTweBAIBRCIRJBIJZDIZ3NzcEBgYiMjISMTExODhhx82KuIEAgHat2+Ptm3bwsfHB25ubpDJZJBKpZBIJIZev4bQx/07d+4cpk6dikWLFqGkpISW3tuRqKgopKam2nyRzZtvvonhw4dbff5nn32GLl26NLv6bMxzCwQCbNu2DZGRkZzHvPbaaxYLSkfwzjvv1JlzZdEvd6EQO3bsQGhoaJ3t5kwTuJ/ExESLjvf09GyUaDWH4OBg7Nmzx+SctYYQi8XYs2ePTVboW+tHplY1NxX29A9H+AZBAtBuIlClUqGgoABLly7FwIEDsXbtWuTm5qKmpsaieEh6Yaifq2fqGH2vn6X3qlarUV1djatXr2Lx4sVITExEamqqxXN6CPMRiUR4/fXX8dtvv5mc72L1l5HPx86dOy2OyC+TybBt2zZMmjSpeTZCVj63u7s79u/fj5EjR5o8bsKECUhISDCrzGHDhiE5Odkhzy0QCLBv3z6L5yB6eXnh4MGDRuc8Dhs2DLNmzTKrnMFPDsbGjRstvu85c+bY3ddiY2Pxxx9/oGfPnhaf27ZtW+zfv99mC7Ss8aOkpCRMnz7d6b5r9vYPR/gGQQLQrkKwpqYG169fx5IlS/D4449jzpw5OH78OORyuWHBR1Pcl1qtRm1tLeRyOTIyMjBz5kwMGDDAEPeP5vzZ5yXdv39/LFq0CJcvX8Znn31m1x4iiUSCzZs3Y/v27YiNjW2wF2js2LH4/fffMX78+GZdz5Y8t0AgwKRJk5CTk2NWD4VAIMCuXbswZcoUk+I+OTkZe/bscWj4JL1437p1K6Kjoxu099SpU3H+/HmTPYerVq3CmjVrOEOq8Hg8TJkyBWkH0qx6VoFAgM2bN2PTxk1GV17birCwMPz555/YsmULIiIiGjw+ICAAc+bMwYULF2wu/sz1I7FYjEWLFmHXrl1Om5HHnv7hKN8gbAOPNTBWWFNTg1GjRuHIkSNOF1TY09MTP//8M3r16lVnuFSpVGLjxo2YN29eoyeX68PBSCQStG/fHsOHD8eTTz6J7t27w83NzdCLp//XFmi1Wuh0OjDGoNPpoNFoUFpain/++QdHjx7FwYMHUVRUhNra2kZPIpfJZFixYgVeeeWVOqug1Wo1/v77b8THxzvdnA6hUIgnn3wSu3fvtii4qFqtxrFjx8xqxNzd3REcHAx/f3/OXlxTFBUVcQYajomJMRlW4n6ys7Px22+/4ebNm7hz5w7EYjECAwPRsWNHJCQkmL3gw5r7yc3N5QzI3KNHD6PXNnXOww8/jICAAKueWx/fMioqCvHx8WbX34PcuHEDO3fuREFBAeRyOTw9PdGpUye88MILhmDBlj63rWzNGENWVhZOnjyJ/Px83L17FyKRCN7e3oiJiUF8fLxFWV7KysqQlpaGnJwc3Lp1CxKJBB06dEBSUhK6du1qaGsyMjKMThnx9/c36yV+7tw5nDhxAleuXEFlZSVUKhU++eQTmy2Q0tfNpUuXcPDgQRQUFKC0tBR8Ph8+Pj7w9/fHY489ht69e5stuhprs/v9qLy8HL6+voiIiMDzzz9vOHfVqlWcvckpKSkYM2ZMk3xHHekfjvANggSg3QTgg8LAxcUFYrEYHh4e6N27N2JjY9GzZ0+EhYUhICDAEJ7l/o+xX1H6IWfGmOHLpdPpoFarUVJSgps3b+L69ev4559/8Pfff+PSpUtQKpVQq9VQqVQ2y0rSmgQgQRCEI3vaLBGABOHwdylVgWU9c1qtFgqFAhUVFbh9+zbS0tIMq4I9PDwQFhZmWNzh7e2Njh07Yvjw4fVEoEqlwnfffYfLly+jpKQEJSUlyM/PR1FREVQqFTQajaH3T/8hCIIgCIIgAdiE6Ofk3T/vrqysDAUFBXWGhWNiYpCQkFBPAGo0GmzduhVZWVkGsaf/EARBEM6DXC5Hfn6+0X0hISFGh2dpTjZBArAZoV+Za60QY4wZegmBf4eMFQoFZ0gWpVIJhUJhE9GnzyCiF5MEQRCEbUhLS8OECROM7nvvvfewbNmyetuLioo4y6M87QQJQCcSfhKJBG3atEFERAQKCwuRl5uHquoqpxdTPB4PHh4eiI6ORnh4OC5evIizZ8+isrKSDEsA+Dd91s6dO43umzBhgk1W6y1fvhwlJSX1tkulUs4gzUTLR61W44MPPjBrsZpIJIKPjw+Cg4PRp08fdOjQwWnymevzLRtj8+bNmDNnTp2FDUqlEnv27OE8x9zFUETjyMjIwE8//WR034svvmhVmCESgC0MmUyG6dOn46233oKnpyeUSiV++eUXLFu2DNeuXbPpQhJbwufzERAQgE8++QSJiYkQiURQKpVISUnB3LlzUVZWRsYlkJGRgY8//tjovkcffdQmAnDz5s24du1ave0+Pj4kAFsxNTU1FufY1tOuXTu89dZbeP311+Hq6tqkzxEeHg6pVGo0jVlxcTEef/xxrF+/Hj169MCdO3cwe/ZsFBYWcv5o79y5MzmHA/jrr7842764uLhWLwBbfRxAoVCIPn364J133kFgYCBkMhm8vLyQlJSEAwcO4N1330X79u0hlUqd5p71OYajoqKwZcsWjBw5Ep6enpBKpfD09MRzzz2HkSNHWhW+hCAIwhkoKCjA3LlzERkZ2eQp2EQiEcaOHcu5PycnB4MGDYK/vz+6deuGn3/+mfPYmJgYCoNCkAB0BlxcXDB48GC4uroa4vjxeDyIxWIEBwdj9uzZOHLkCJKTkxEWFgZXV1ez8/Pa417d3NzQqVMnfPDBB0hNTcXAgQPrhELR33u/fv1onglBEM2ewsJCDBo0CAcPHmzS+3j11VdtUs60adPIqAQJQGeBK+cvn8+HRCJBWFgYkpOTkZmZifXr12PgwIHw9fWFTCazqxgUCAQQi8Vwd3dHYGAgRo4ciS+//BLHjx/Hm2++iTZt2tSJ3aeHMYaysjJaDEIQRIugpqYG48aNQ15eXpPdQ58+fSxO2/cgXbt2pVRphNPQ6ucAqlQqpKamYtq0aQgKCqoXSV6/MlggEKBNmzYYN24cxowZg5s3b+LYsWM4cuQITp8+jaqqKkO8Pv0q4vuDQXMJPJFIBMYY+Hy+4SMUCiGVSvHQQw8hLi4OvXv3RnR0NHx8fODi4gKhUMiZdUSn06GkpAQpKSlQKpXk4QRBtAjKysowYcIEHD9+vMnu4dNPP0VWVhYuXrxo8bkeHh7YsWMHJBIJGZMgAegMaLVa5OTkYMaMGVi2bBlCQ0M5v6B8Ph9isRgikQidO3dGeHg4Jk+ejKqqKuTn5+Py5cu4evUq8vLyUFhYiNLSUoSGhhoVgHw+H7169UJYWBi8vb3Rpk0btGnTBu3atUO7du0QFBQEqVRaRxQ2lGpOo9Hgzp07mDt3Ls6ePUs9gARBOC0ymQzt2rUz/F1aWoqSkhKYSk6VmZmJv/76C7169WqSe/b29saxY8eQkJCArKwss8/z9/dHWloa5cclSAA6G/pewH/++Qdz587F6NGj4e7uzjmH7v5eQeDfRPZeXl7o3r27IbWb/qMPMfMgEokEq1atMhxz/0cfRNrcEAiMMSgUCly8eBHz5s1DZmYm9f4RBOHU9O/fHwcOHKizrba2FqmpqZg1axZu3bpl9LwNGzbgq6++arL7DggIwMmTJ7F48WKsW7cOVVVVnMfy+Xy89NJL+PDDD+Hv709GdzCTJk1Cv379jO7r1q0bCUBykf/Ly3vt2jXMnj0b3333HWbOnIkBAwYYFn2Y6n3j8XgWr7jl8XiNXlnMGINSqYRcLsf27duxZs0aFN8uhlrT8iPQN9QbShBE80MqlWLs2LGIi4vDww8/bDSe6aFDh5r8PsViMZYtW4bk5GTs2bMHp06dQkFBASorK+Hr6wt/f3/06tULSUlJJPyaEH9/f6p/EoDmC6qqqiqcPHkSZ8+eRY8ePfDKK69gyJAh8PT0NMy/a+p71Gg0UKvVKCkpQWpqKjZs2IDr169zLmYhCIJoTnTo0AFvvPEGVqxYUW9fQUEBSktLjaZfczQeHh6YNGkSLewgSAC2FBGo1WpRUVGBU6dO4cyZMwgMDMTo0aMxfPhwdO/e3TA3z5x5eba6H51OB61Wi6qqKmRnZ2Pfvn1ITU1FaWkpCT+CIFocI0aMMCoAAeDmzZtOIQAJggRgCxaCVVVVqK6uxvr167Fp0ya0bdsW8fHxeOKJJxAVFQVfX1+DELx/Dp8lwpAxBp1OZ5g3qP+/TqdDbW0t8vLycObMGWRmZuLYsWMoKSmBUqmEWqWGjpHwIwii5dGhQwfOfXfu3KEKIggSgI4RgwqFAgqFAhUVFbh69So2btwIkUiEjh07IjY2Fg899BA6deqE4OBg+Pn5wc3NzbCYQywW11vQoS9Tq9WipqYGlZWVuHfvHoqLi3Hz5k1cuXIFFy5cQE5ODqqrq6HRaKBSqQxhZgiCIFoypuZIy+VyqiCCIAHoWHQ6HZRKJZRKJXg8HsrKynD27FlDL6D+X5lMBl9fX8TGxmLt2rX1GrPa2lpMnjwZJ0+ehEKhqDfUq9PpoNFooNVqTYZFIBpHRUUFDh06hPT0dNy6dQt3796FRCKBv78/HnroIcTHx6N3795mrcjOyspCZmam0X2JiYmIiIgAAGRnZ2PLli3Iz89HUVERfHx80K1bN0ycOBEPP/yw2feu0Wiwd+9eHDx4ELm5uaiqqkJQUBA6duyIyZMnO3XICWvq6vbt2wgODkZERARef/11hIaG1jv32rVr2LJlCy5duoSCggLw+Xy0bdsWvXv3xsSJE62aEG5LH+GyXXV1NQIDA9G+fXu8+OKL6N27NwDg8uXLnBkwnn76aXTq1KnBaymVSuzduxenTp3C7du3UVBQAKFQiMDAQHTu3BlJSdhyo+IAACAASURBVEno2bOn0y2sMjXf+sEoB+b6EwAoFAr89NNPSE9Px507d3D79m1kZmYatZ+xuhOLxQgJCUFQUBCeeeYZxMbGNuo5FQoFdu3ahb/++gtFRUWGawQGBqJLly4YNWoUevToYZF/OcL2xcXF2L9/P06ePImioiKUlZXB19cXbdu2xZAhQ5CQkAA3Nzer6sRWZVviF87QhtuzHeDq3TJJdXU1e+qpp5hAIGAAnOrj6enJTp06xdRqdZ17VigUbN26dUwmkzXZvfF4PObi4sJ69+7Nqqqq6tVrVVUVi4uLa/J6lclkbN26dUyhUNS5P5VKxU6dOsU8PT2dzu5CoZAlJCSw6upqZi01NTVs8eLFzMPDo8Hr9ezZk6WmpjZY5urVqznL+OGHH1hNTQ177rnnTF5r6tSpTKlUNnitW7dusb59+5osa9y4cayqqoqtXLmS85iUlBRmC8LDw42W7+PjY5e6cnFxYUuXLmU6nY4xxphGo2HJycmMx+NxniOVStmnn35qOKcpfMRc2yUlJbHKyspG2U6tVrMlS5Ywf3//Bu8/Li6OZWRkMFtTVlbGec2EhIQG3z1c53799dcW+RNjjCmVSrZs2TKj9tRqtVbXXXR0NPvpp58srhulUsnmz5/PfHx8GrxG37592YkTJ8wu2562l8vl7M0332Qikchkub6+vuyTTz5hGo2myco2xy+cuQ23RTtgChKADwgLqVTKJBIJ4/P5jb4/gUDAYmNjOQVgbGysTa7D4/GYVCplMpmswS8OCUDG8vPzWXR0tMXXnT17dr0XhbmNx9atWxv8sus/48ePN3n/FRUV7KGHHjKrrNjYWLZw4cJmJQAtqatp06ax6upqNnr0aLPtuGLFiibzEUts17NnT/b+++9bZbu7d++yAQMGWNyOfPjhh2YLZHsLwPLycs5zt2/fbtGL++zZs6xbt26cx9xvM2vrbuHChSZtfz+FhYWsX79+Fl2Dz+ez//mf/2mwbHva/sKFCywyMtKisocNG2ZWW22Psm0pAJuqDW9MO0AC0Ezn9/DwYMOGDWPLli1js2bNYuHh4UwsFju9AHRxcWHdunVjixYtYp9//jmbOHEi8/LyIgFo4hdmRESE1ddeunSpVY1Nu3btLLrOvn37OK8zd+5ci8qSSqXNSgBaWle+vr4Wv+yys7ObxEcstZ2pdpfLdkqlksXFxVl9/x999JFTCMC7d+9ynrt7926z/WnUqFHMzc3N5DPrhVtj686U7fXU1tayqKgoq6/x+eefm+xVtJftCwsLWUhIiFXljhkzxqS4tFfZthSATdmGW9MOkAA0UwC6u7uzdevWsbKyMlZbW8uqq6vZlStX2PTp05mfn5/FvWqOEIA8Ho+5urqy+Ph4dvXqVVZTU8MUCgUrLy9n33zzjVnDVq1RAI4cObJR1+bz+ezvv/+2uLGx9DNw4EDOoR1b2sQZBaAjPqNHj3a4jzjKdsnJyY3+UWlKIDtKAObl5XGe+8svv9jUn/QCsLF1JxAI2PHjx00+1xtvvNGoa0gkEnbp0iWH2l6n01ncq/jg55tvvjF6z/Ys25YCsCW24a0+nYJAIEB0dDRefPFFeHh4QCKRQCqVIiwsDB9++CEOHDiAiRMnIjAw0BD/rynh8XiGHJrvvvsutmzZgtDQUEilUojFYri7uyMpKQmPPvpokwetdjYyMjKwb9++Ri8CWrBggd3v9fjx46ioqKi3/fLlyygvLydjNpKDBw9CoVA41EccYbvCwkJ8/PHHjSpDrVZjyZIlTW6ja9euce5r27atU9adVqvF/PnzOfffuHEDn3/+eaMXjSxfvtyhtt+3bx8yMjIaVfbixYuhVqsdWnZT0hza8FYvAIVCIXr27AmpVGpYZaXP9evq6oqePXtizZo1OHr0KObPn48ePXrAy8sLEonEYWJQIBBAKpHC09MT3bp1w/z585Geno5Zs2YhICCgjtDj8XgQiUTo0aOHIVcx8S+ffvqpTco5cOCA3eOQabVaXL582aKXImE+tbW1uHTpkkN9xBG227ZtGzQaTaPL2bNnD4qKiprURrt37+bcFxwc7LR1d/z4ceTm5hrdt3XrVptEddi+fXs9EWFP25vKvSwUCjF48GA8++yzCAoK4jzu+vXrRlfY2rPspqQ5tOGtXgDqdDoUFhYa/eLoc/zKZDJERkZi9uzZOHr0KFJTUzFv3jz06dPHEPNPIpFAJBI1ShTqrycWi+Hq6goPdw/4+fnhscceQ/K8ZOzfvx/Hjx/HzJkzERYWxtkjqdFocP36dWi1WnrT3/fCfzDx/P1ERkZi7969KCsrQ15eHhYuXMh5LGMMv/zyi8X38NRTT+HcuXMoKyvD6dOnER0dbfL4e/fu1dtWWlpq8pwRI0bg/PnzKCsrQ3p6Ojp37tws7WVpXQHA0KFDce7cOZSXl+P06dMNhsG5e/euQ32kIdv16tULWVlZKC0txaFDhxASEmJxvZ0+fZpzn7+/P3bs2IGSkhLcvHkTycnJnKFFGGNIT09vMvtnZWVh06ZNRvd17doVXl5eVpXbvn17rFy5En/++Sfkcjny8/Oxf/9+8Pl8m9WdXgRaap82bdpg165dkMvlyMvLw4wZMziPValU+P333x1i+6qqKhw+fNjosd7e3sjMzMThw4exc+dOXL16Fc899xznffz00091/rZn2c7QLlnThtuiHTCb1j4HkMfjMV9fX5aRkWHWnDKdTsdUKhVTKBSssrKSXb9+naWlpbGVK1eyiRMnsri4OBYUFMR8fHyYr68vGzBggNFyq6ur2cCBA5mPjw/z9vZmQUFBLCoqio1KGsVmz57NvvzyS3bs2DFWUFDAqqqqWG1tLVOpVCYn0up0OlZTU8MOHz7MvL29aQ7gfWRnZ5uc3FtYWFjvnJdeesnkak9L5o+0adOmXmiAu3fvmpycbmxux2effcZ5fHh4eL3vQkFBAXN3d29WcwCtqau2bdsylUpV55w7d+4wV1dXznN+/PFHh/qIKdt5eHiwsrKyeqsiTc0RNma7mJgYiyalT5w4kfP4mTNnOnQOoEajYXl5eWzVqlUmbT19+nSr5m6NHj2aVVRUcN6rLevuP//5j9FrdO7c2ex5jYwxk6vb58+f7xDbX7p0ifO41atXG323BQQEmDX31p5l23oOoCPacFu1AzQH0HwBDLlcjpemvIQff/wR5eXlJrvRH+yla9++PYYMGYIZM2Zgw4YNSE9Px4ULF5CVlYWMjAysW7cOIpGoXjkikQjr16/HiRMncPbsWVy8eBEnTpzAtv/dhqVLl2LSpEno06cP2rRpA1dXV0gkEri4uJj81VZTU4OjR4/i1VdfRVlZGXX73ceVK1c4902fPh1t2rSpt33ixImc51hav2PGjKnnB35+fhgzZoxF5ahUKpPXeHDeZ9u2bTF27NhmZStr6uqZZ56Bi4tLvV6PZ555xml8xJTtnnvuOXh6etbZ1qVLFyQlJVlUd8Z6HADAy8sLI0aMqLd96tSpnGXZO9vGr7/+Cn9/f/j7+8PX1xcuLi4IDQ3F3LlzUVVVxdn+vv766xZfa/Dgwfj+++/h7u7ukLqrrKw0q9f5/u/pkCFD6m1/+eWXzfYve9m+uLiY87gnn3yy3jaZTIahQ4fCx8en3ufBUSl7lu0M7ZKlbbit2gFzoVUC+HcY+EbuDUyfPh07d+7E7Nmz8cgjjxhElykxKBAI6s21Y4zB3d3dMNfD2GIMgUBgGJ6zNHfwg9dSqVS4d+8eNmzYgA0bNqC0tJSyhzxAfHw8Tp48aXRf9+7djW43lYvUUgEYFhbGOSRlCaYaucjISOPPEdqhWdnKmrrq2LFjo+vX3j5ije0ezFTQEFwCg9M3TNy/sQnstkStVnOKFi6ef/55dOnSxbKXnFCIL7/80mRbbuu6MzbJX6PRcIpqLjt37tyZc5jxwWFwe9neVL1x2e/bb781yzb2LNsZ2iVbteGWtgMkAK0QUvqUTydPnsTAgQMxbdo0PProo5BKpYYUb+agF4YNHdOYVbparRZqtRrl5eVITU3F2rVrcf36ddTU1JAxjeDu7o4+ffpYdI5MJjPpL5bANYHZ1dXVYj/lgis1kkgsala2sqauuM4xZUNH+4gp23GtavXx8TH7XpRKJWpra43u48qrK5FImtUPA2sW6QwePJjzB4K96s6YeK6pqeH0Aa5rhIeHIysrq0ltHxAQwLlvxYoVGDBgQIPiuinKdoZ2ydI23BbtgCW0+iHgBw2jVqshl8uxf/9+PP/880hMTMTGjRtx7do1VFdXQ6VS2WSllbX3plQqUVlZibNnz2LVqlXo378/Zs6cifPnz5P4c2JoRbZ966q5168t7r8l9/q7u7sjJSXFqhehPp+qI+vOWHn2tI89yw4NDeVcdJOeno4BAwbg8OHDVt2DPctujm24o9sx6gE0Ibb0qwnPnDmDxYsXo2/fvkhMTES/fv0QEhJiWPV7/8cWaLVa6HQ6/P9A3dBqtaisrMTFixeRmZmJQ4cO4fz586itrYVSqYROpyOjNQL9UFRlZaVhDqiXlxetoibIR5yA6OhofP/995zDYw3Rrl07qsRGipIxY8Zwhms5efIknnrqKXTv3h1Tp07FpEmTzF6lbc+yCRKAjRaCWq0WNTU1hhARv/76K8RiMYKDg/H4448jOjoaXbt2Rfv27eHt7Q2BQGAQgjwezxCXz1jZKpXKIPKAf+ciarValJSU4NatW7hx4wZycnJw5swZnD17FtXV1VCr1VCr1U3SC9mSqKiowIYNG3D48GGcOHGCc/iEIB9pCT5y/PhxowLKmduR2NhYTJ48GS+//DLEYrHV5Xh4eLRqP7aF7efMmYNvv/3W5AKGc+fO4e2338a8efPw3HPPYc6cOZxzZx1VNkEC0GZiUC++qqurUV5ejitXrkAoFBo+Pj4+CAsLQ7t27RAYGAhvb2+EhYVhxIgR9USgSqXC1q1bcfnyZZSWluLevXsoKChAUVGRYZhZp9NBo9EYPoRt2L59O15//XXKqEG0Gh9hjJlc5dwUdOrUCS+88ILhb4FAgLZt2yIiIgKdOnWyS7aP1vruaqztO3fujKVLl2Lu3LkNHqtQKLB161Z8++23GDlyJNavX2+yF9aeZRMkAO2CVquFVquFUqk0bCstLcWNGzcMw8ECgQAxMTFITEysJwA1Gg2+++47ZGVlGcSe/kPYjy+//BKvvfYaVQRBPtLEREREYPHixVQRzYTZs2dDpVKZTHX3oPDcu3cvMjMzsWPHDqNhbhxRNsENLQKx8S8tjUYDlUoFhUJh+HBNYFUqlVAoFHV6/Aj7ceXKFbz99ttUEQT5CEFYKhb4fLz//vv44Ycf4Ovra/Z5JSUlGDNmDC5cuNAkZRMtWAByiSvG2L9xsolG1WFLWlm4evVqKBQKMjhBPkIQVjJ27Fjk5uZi+fLl8PPzM+uciooKjB8/vsH3iT3LJurTrIeAmY6hvLzcaLytkpIS6Bj1qJkj/kpLS432Psrl8hbzpVKr1fjxxx9NHvPII49g6tSp6N69O7y8vHDhwoVml0WDIB8xRkhICMaNG2fROQ899BA5RQvAHrZ3c3PDvHnzMHPmTBw4cADbtm1Damoq1Go15znZ2dk4deoU+vbt22RlEy1IACqUCuzevRuPPvooPDw8wOfzodFoUFJSgtTUVJOrioh/UalU+Omnn/DKK69AKBTCxcUFOp0ONTU12L17d505js2Za9eumUzCvXnzZrz00kt1tgUGBpKDtCJaso907NgRK1asICO3Quxpe7FYjFGjRmHUqFG4ffs2PvroI6xbt45TrG3YsMFskWbPsol/adZDwGq1Gj/88AM++eQT5Ofno7y8HDk5OYbAyDSnrmG0Wi1ycnIwa9YsXLx4EeXl5cjPz8dHH32ElJSUFiOiL126xLkvKSmp3oudaH2QjxCE9QQFBeGjjz7CgQMHOLN3/P77705XdmumeQ8BM4bKykqsXr0aX3zxBaRSKaqqqlBVVWWyu9iR6EO5PHg/Go3GaYZXlUol9uzZg0OHDsHd3R3V1dWora1tUbHxrl+/zrlv2LBhLeIZuXye5sa0Dh+xVSD6VtkTYuO6M5bRwZ72cSbbDx48GJMnT8amTZvq7btx4wY0Go3VaVDtWTYJwGYqAvWrbXk8nlO97PRzEdPS0urlY1QoFLh3955T3K8+KLVKpTI6p7IlUFlZybkvODjY6HZnjL3I4/E49+Xl5RndfufOHWrpnMBHTNlOLpcb3W5JekeRSAQ3NzdUVVXV28eVsaSyshIpKSlG93Xt2tXi3MjNFVN1ZyyvLwBUV1dzlufp6Vlvm0wmA5/PNzoyxTVaVVZWht27dxvd1717d8TFxdnd9qdOnTK6ytbd3R3PPvus0fOfeOIJoyJNP0VLP3XCnmU7K/ZuB1qVAHTmng6dToebN2/i1VdfrWd0xpjJ5OBUh7bF1BeooKDA6PabN2865YuKi5ycHKPb//vf/5K6cwIfMWW78+fPG91uaQBff39/oyLg6tWrRo8/duwYXn75ZaP7Jk6c2GoEoKm6u3z5MrRabb1ePVO2MZZ9RCAQwNfXF3fv3q2379q1a0bLOXToEKd9XnnlFYMAtKft9+7di1WrVhl9nqFDhxp91nv37nHWzf1ZXexZtjP/2LB3O2AuNGZgZ7RaLaqqqlBZWVnnU1VVRXMUHYipYYEjR44Y3Z6enu50zyGRSDj3/fjjj/V6ATMzM3H48GFyACfwEVO2++GHH+q9vM+dO8fZQ2NKxBjj9u3bOHDgQL0fqJ9//jlnWd7e3q3K/lx1V11djW+++abeD+W1a9dyluXu7m7RNa5du4ajR4/We3ds2LCB8xoP5sS1l+3bt2/P+W77+uuv621Xq9XYuXOn0XNcXFzqiDp7lu2sOKIdIAFIEPdhKqZUSkoKfv3113q/uFavXu10z+Hj48O5T6lUon///tixYweysrKwfv16JCYmkvGdxEdM2a64uBj9+/fHvn37cObMGWzYsAH9+/e3eC5zp06dOPe98MIL+Pjjj5GdnY2MjAxMnDgRBw8e5Dw+KiqqVdnfVN298cYbWLJkCf7++29kZGRg9OjRSEtL4zy+W7duRrcby8mrF5TPPPMM1q5dizNnziA9PR3PP/88MjIyOK/xyCOPOMT2vXr14jxu4cKF+OWXXwx/37lzBy+++CJOnTpl9Pj+/fvXma9oz7KdFUe0A2b/6KVmn2gNmMoXqdPpkJCQgClTpqBLly7Iy8vD119/bXQ4pakJCQkxuT8vL8/imF+EY3ykIdtlZWUhKSmpUc/Qt29fbN++3ei+iooKzJkzx6xyeDwehg4d2qrsb6rulEolFixYgAULFphVd0888QTnNfbu3Wt0n1wuNzsLjUAgwJNPPukQ2/fq1Qtt2rRBUVFRvWPLy8sxdOhQBAcHw8/PDxcuXDApVp5++uk6f9uzbGfFEe2AuVAPoAXweDxDnt+m/BCW89hjj5ncr9FosGnTJsyePRvr1q1zSvGn70EwNYmYcF4fcYTtxo8fD5lM1uhynn32Wc6FLy0VW9XdiBEjOBciTJo0yeQcMHOZMGFCvZRp9rK9QCDAG2+8YfKcwsJCnD171qRACwwMxLRp0+oJWXuV7aw4UxtOPYAWiD93N3eEdwpvsmXmjDHI5XLcuHGD5g9aSFBQEGJjY/Hnn3826+fw8vJCXFwcTp8+TUZtZj7iCNt5eXlh+fLlmDFjhtVluLm5YcmSJa3O/raoO1dXV5NBlwMCArBo0SK8++67Vl/D29sbCxcudKjtZ82aha+++gq5ublWl71y5Uq4uro6tGxqw0kA2gxvH2/87//+L9q1a9ckPXFKpRIbNmzA0qVLW1SMPkexYMECDB8+3OzjExMT602edgZee+01ixoPrvAQhON9xFLbcQ2PmeLNN9/Er7/+itTUVIufXyAQYNu2bZxz1Vo6jak7APjss8/QpUsXk8e88847OHLkSL05pWa9sIVC7NixA6GhoQ61vVQqxd69e/HEE09whsVpyO8nTZpkdJ89y3ZWHNEOmAONJ5oJYwzFxcXYunUr+Hw+XF1dHfqRSqVQKBTYtm1bi0nP5miGDRuGWbNmmXXs4CcHY+PGjU75HBMmTEBCQoLZz5ycnEzGdxIfscR2SUlJmD59uuWNOp+PnTt3Wpy5xN3dHfv378fIkSNbrf2trTuZTIZt27aZJUQEAgH27dtn8VxdLy8vHDx40OTcTHvaPioqCr/88gvnyl0u5s2bh08//dTkMfYsu7m34da2AyQAbYxSqcSWLVtw7tw5hwcJVqlU+PHHH5Gfn0/Dv41g1apVWLNmDedcGR6PhylTpiDtQJpN5urYA4FAgF27dmHKlCmcx4hEIiQnJ2PPnj1O+xyt0UfMsZ1YLMaiRYuwa9cuoxklzEEikWDz5s3Yvn07YmNjG7ynSZMmIScnh1aNW1h3QqEQY8eOxe+//47x48dbLBi3bt2K6OjoBq8xdepUnD9/HoMHD25S2/fu3RvZ2dl4++236yU3eJA+ffrg+PHjWL58uVl+bM+ym2Mbbot2oCF4rIHIvzU1NRg1ahSOHDnCGVG8NeHi4oL4+Hh8++239eIw2QudTofbt29j8ODBuHTpUqsXgEKhEEOGDMGuXbusnvRcVlaGtLQ05OTk4NatW5BIJOjQoQOSkpLQtWtXAP/GosrIyDAaHNvf379emIyioiLOYMwxMTFGl//n5uZyBmrt0aNHg1Htb9y4gZ07d6KgoAByuRyenp7o1KkTXnjhBQQFBdnkGubw559/ory83OiLzliCdkfVlalzHn74YQQEBDjUR7hsV15eDl9fX0REROD55583PP+qVas4e3BTUlIwZswYs+yTnZ2N3377DTdv3sSdO3fg4uICHx8fREVFIT4+3mRoisagVqtx7Ngxo/t8fX0bFD/mYo0/mcuDdScWixEYGIiOHTsiISGh0d8fxhiysrJw8uRJ5Ofn4+7duxCJRPD29kZMTAzi4+ONZhax9v5tZfuysjIcO3YMJ0+eRFlZGfh8PsRiMdq3b4+RI0ciPDzc6nu2VdmW+kVTt+H2bgeMOZ9Jqqur2VNPPcUEAgED0Oo/PB6PeXp6sm3btjGFQsEcQW1tLVu3bh1zdXUlGwBMKBSyhIQEVl1dzQiiJbNy5UrO70FKSgpVEEFQO2B1uTQEbMWvtcrKSqxYvgLFxcV2743T6XQoLS3FF198QQs/CIIgCIKwzWgaVYF1ouza9Wv48ssv8d5779l1+TnN/SOIloFcLkd+fr7RfSEhIUaHl+yVAYAgCGoHSABaiUKhwObNm5GUlITo6Gi7xAak3j+CaDmkpaVhwoQJRve99957WLZsWb3tpkI/0OIegqB2oDHtgNlDwJR9oC6MMZSWluLDDz+0W4w16v0z4bh8Pvkk0ax4MHfr/WzevBlyubzONqVSiT179nCeY2oBC0EQ1A7YRABKJBKymhE0Gg0yMjLw888/o7a2FhqNBlqt1iYfjUaDkpIS6v3j+DHSXKK+E4Se8PBwzvAWxcXFePzxx3H06FHcu3cP58+fR1JSEgoLCzm/A507d6ZKJQhqB6y+lwbHLXk8HsLCwiAQCBwe+87ZYYyhoqICS5cuRfv27eHh4WGzstVqNdLS0pCXl0e9fw8gEAgQGRlJeZGJZoVIJMLYsWOxdetWo/tzcnIwaNAgs8qKiYmBt7c3VSpBUDtgPwEoEAjwxBNPYMuWLZSBwgg6nQ4XL15EYmKizYcklUol9f4ZQSKRoF+/fiQAiWbHq6++ytnwW0JzSXxPEITztgMNBoJmjOHu3buIj4/HP//8Q71RRJMiEAjw6KOPIjU1lXpAiGbJ+PHjsX37dqvP79q1K7KysmhqDkFQO9CodqDBLhQejwcvLy/Mnz/fbtHiCcKsXys8Hnx9ffHBBx/QHECi2fLpp5+iS5cuVp3r4eGBHTt2kPgjCGoHGt0OmDWG5uLigqFDh+Ldd9+Fn58fDb0RDofP58PPzw+LFy9G3759KQQG0Wzx9vbGsWPHEBMTY9F5/v7++PXXX02mlyMIgtoBcxEsXLhwYUMH8Xg8uLi4ICoqCl26dMH5nPOoqamBTqejIWHCbuj9TiaTISIiAmvWrEFSUhL1/hHNHldXV0yePBk6nQ7Z2dlQqVQmf/y8/PLL2L17NyIiIqjyCILaAdu8YxuaA3g/jDGo1WqUlJTg0KFDSEtLw99//43KykpYUAxH4Y09nTWJARljKC8vryeE9aFKGuqpUqlURuMI8vl8eHp6ttpYdzweD54enugZ3ROJiYlISEiAr68vxGIxtRpEi6KiogJ79uzBqVOnUFBQgMrKSvj6+sLf3x+9evVCUlIS/P39qaIIgtoB275nmRXKjTEGlUoFnU4HrVYLhULReAFoAyHWFOfW1tZi0KBByM3NrVOOm5sbvvrqKzz22GOcWUK0Wi1+++03vPzyy3VEII/HQ2hoKNLT0yGTyVqtAJTJZODz+RAKhRAKhTT1gCAIgiBshFX5y3g8Xp2emNY8JFddXW1UmPD5fPj4+MDf3x8uLi5Gz1Wr1fD29jbayycUCuHv79+q65YyfRAEQRCEEwlAelGbXy/6jzX1ZupcgiAIgiCIJhWALQGtVmvVULBWqzW5T6vVcoo4rVbLuYiGMQaNRmN19hWuYWeCIAiCIIhWrxK0Wi2qq6uRl5eHmpoai0VgTU2N0WwdGo0G586dg5ubGwQCgdFz9ccYE5G1tbXIysrizBloCjc3N4SEhJi8NkEQBEEQrRerFoG0FDQaDa5evYoZM2bgr7/+MtmbxwVjDFVVVUZXAUulUs75f3rUajVqa2rrrWLm8/lwc3OzaghYKBQiNjYWa9asQadOnUgEEgRBEARBAlBPTU0NJkyYgAMHDkChULQco/7/RTrDhg3DN998Q3HzCIIgCIKoQ6uNq6HT6SCXUJCkKQAAAORJREFUy/Hbb7+1KPEH/NsrqVQqcfz4cVRUVKAVa3yCIAiCIEgA/h88Hg8ikahFL5YQiUQ0/EsQBEEQBAnA+wWgu7s7xo0bBw8PjxYVZJjH48HDwwPjxo2Du7s7hZIhCIIgCKIOrXoVsEgkwty5c8Hj8fDtt9/izp07zX64lMfjISAgAC+99BJmzZoFiURCXk4QBEEQRF29wGiCGEEQBEEQRKuCkqsSBEEQBEGQACQIgiAIgiBIABIEQRAEQRAkAAmCIAiCIAgSgARBEARBEAQJQIIgCIIgCIIEIEEQBEEQBNGk/D88LwuWCBiSZgAAAABJRU5ErkJggg==) #Assignment 6 ``` # In this assignment, we will train a U-Net classifer to detect mitochondria # in electron microscopy images! ### IMPORTANT ### # # Activate GPU support: Runtime -> Change Runtime Type # Hardware Accelerator: GPU # ### IMPORTANT ### # load numpy and matplotlib %pylab inline # load the unet tensorflow code !pip install git+https://github.com/jakeret/unet.git # .. and use it! # Note: There are a ton of U-Net implementations but this one is easier to use! import unet ``` **Task 1**: Study the unet circle example and learn about mitochondria! [15 Points] ``` # The unet package allows to train a U-Net classifier with little code. # As discussed in class, the U-Net can be used to label/segment/annotate images. # TODO: Please study the Circles example # https://github.com/jakeret/unet/blob/master/notebooks/circles.ipynb # TODO: ... and look how the (artificial) dataset is generated # https://github.com/jakeret/unet/blob/master/src/unet/datasets/circles.py # 1) Question # TODO: Please describe what the U-Net is trained to do. # # TODO: YOUR ANSWER ``` U-Net is trained to predict the mask which will be used to segment the mitochondria in the input image. ``` # 2) Question # TODO: In circles.py, what does the following mean: # channels = 1 # classes = 2 # # TODO: YOUR ANSWER ``` In CNNs, channels are essentially concerned with capturing different features on the basis of which, distinction will be made and learned by the network between inputs. If the image is an RGB image, then the number of channels will be 3, one for each color. The variable "channels" has the value of 1 as grayscaled images are used. We have two classes one for foreground and one for background, so that's why the value of the "classes" variable is 2. ``` # 3) Question # TODO: What are mitochondria and what is their appearance in # electron microscopy data? # Hint: You can validate your answer in Task 4! # # TODO: YOUR ANSWER ``` Mitochondria is a cell organelle which is also known as the power house of the cell, is a bean shaped component of the cell. **Task 2**: Setup a datagenerator for mitochondria images! [45 Points] ``` # TODO: # Download https://cs480.org/data/mito.npz to your computer. # Then, please upload mito.npz using the file panel on the left. # The code below is similar to the circles.py file from Task 1. # We follow Tensorflow conventions to design a tf.data.Dataset for training # the U-Net. # # TODO: Please add four different data augmentation methods in the code block # below. (image normalization to 0..1, horizontal data flip, vertical data flip, # rotation by 90 degrees) # # Hint: https://github.com/jakeret/unet/blob/master/src/unet/datasets/oxford_iiit_pet.py#L25 import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_datasets.core import DatasetInfo from typing import Tuple, List import numpy as np IMAGE_SIZE = (256, 256) channels = 1 classes = 2 def load_data(count:int, splits:Tuple[float]=(0.7, 0.2, 0.1), **kwargs) -> List[tf.data.Dataset]: return [tf.data.Dataset.from_tensor_slices(_build_samples(int(split * count), **kwargs)) for split in splits] # loaded = np.load('mito.npz') # loadedimages = loaded['arr_0'][0].copy() # loadedmasks = loaded['arr_0'][1].copy() # print(loadedimages) # print(loadedmasks) def _build_samples(sample_count:int, **kwargs) -> Tuple[np.array, np.array]: # here we load the mitochondria data loaded = np.load('mito.npz') loadedimages = loaded['arr_0'][0].copy() loadedmasks = loaded['arr_0'][1].copy() # now let's go to numpyland images = np.empty((sample_count, IMAGE_SIZE[0], IMAGE_SIZE[1], 1)) labels = np.empty((sample_count, IMAGE_SIZE[0], IMAGE_SIZE[1], 2)) for i in range(sample_count): image, mask = loadedimages[i], loadedmasks[i] image = image.reshape((IMAGE_SIZE[0], IMAGE_SIZE[1], 1)).astype(np.float) mask = mask.reshape((IMAGE_SIZE[0], IMAGE_SIZE[1], 1)) # # TODO: Normalize the image to 0..1 # # TODO: YOUR CODE image = tf.cast(image, tf.float32)/255.0 # # TODO: Use Tensorflow to flip the image horizontally # if tf.random.uniform(()) > 0.5: # # TODO: YOUR CODE # image = tf.image.flip_left_right(image) mask = tf.image.flip_left_right(mask) # # TODO: Use Tensorflow to flip the image vertically # if tf.random.uniform(()) > 0.5: # # TODO: YOUR CODE # image = tf.image.flip_up_down(image) mask = tf.image.flip_up_down(mask) # # TODO: Use Tensorflow to rotate the image 90 degrees # if tf.random.uniform(()) > 0.5: # # TODO: YOUR CODE # image = tf.image.rot90(image) mask = tf.image.rot90(mask) # augmentation done, let's store the image images[i] = image # here we split the mask to background and foreground fg = np.zeros((IMAGE_SIZE[0], IMAGE_SIZE[1], 1), dtype=np.bool) fg[mask == 255] = 1 bg = np.zeros((IMAGE_SIZE[0], IMAGE_SIZE[1], 1), dtype=np.bool) bg[mask == 0] = 1 labels[i, :, :, 0] = bg[:,:,0] labels[i, :, :, 1] = fg[:,:,0] return images, labels ``` **Task 3**: Let's train the U-Net! [25 Points] ``` # # We can now create our training, validation, and testing data by calling # our methods from Task 2. # train, val, test = load_data( 660, splits=(0.7, 0.2, 0.1) ) # 1) Question # TODO: We have in total 660 images + 660 masks. Based on the code above, # how many images are used for training alone? # # TODO: YOUR ANSWER ``` There are around 462 images used for training. ``` # # Let's setup the U-Net! # LEARNING_RATE = 1e-3 unet_model = unet.build_model(channels=channels, num_classes=classes, layer_depth=5, filters_root=64, padding="same") unet.finalize_model(unet_model, learning_rate=LEARNING_RATE) # # And, let's setup the trainer... # trainer = unet.Trainer(checkpoint_callback=False, learning_rate_scheduler=unet.SchedulerType.WARMUP_LINEAR_DECAY, warmup_proportion=0.1, learning_rate=LEARNING_RATE) # # ...and train the U-Net for 50 epochs with a batch_size of 10! # # TODO: Please complete the code below. # Hint: Don't forget to use training and validation data. # Hint 2: This will take roughly 30 minutes! # trainer.fit(unet_model, train, val, epochs=50, batch_size=10 ) ``` **Task 4**: Let's predict mitochondria in the testing data! [15 Points] ``` # # After training, let's try the U-Net on our testing data. # # The code below displays the first 10 input images, the original masks by experts, # and the predicted masks from the U-Net. rows = 10 fig, axs = plt.subplots(rows, 3, figsize=(8, 30)) for ax, (image, label) in zip(axs, test.take(rows).batch(1)): prediction = unet_model.predict(image) # print(prediction.shape) ax[0].matshow(image[0, :, :, 0]) ax[1].matshow(label[0, :, :, 1], cmap="gray") ax[2].matshow(prediction[0].argmax(axis=-1), cmap="gray") # 1) Question # TODO: Why do we use the prediction[0].argmax(axis=-1) command # to display the prediction? # # TODO: YOUR ANSWER ``` As the shape of the prediction array is (1, 256, 256, 2), therefore picking the first value will result in all the predicted masks which have the dimensions of the input images to the network. The prediction results in probabilities and we want to pick the highest probability made by the model as that's what the model primarily implies. Hence the argmax in the code <code>prediction[0].argmax(axis=-1)</code> is used to pick the highest prediction. ``` # 2) Question # TODO: Is the quality of the segmentation good and how could we improve it? # Hint: Think along the lines of some traditional image processing rather than # increasing the training data size. # # TODO: YOUR ANSWER ``` There are few ways we can do this: <ol> <li>We can set a threshold and eliminate all the predicted masks which are less than that.</li> <li>We can incorporate shrink and expand algorithm to get rid of extraneous predicted masks.</li> </ol> ``` ``` **Bonus**: Compare against the state-of-the-art literature! [33 Points] ``` # # The following paper also uses a 2D U-Net to detect mitochondria: # # https://danielhaehn.com/papers/?casser2020fast # # 1) Question # # TODO: How many learnable parameters does your trained unet_model have? # Hint: You can use TF's Model Summary function to answer this. # # TODO: YOUR CODE unet_model.summary() ``` There are 31,030,658 training parameters. ``` # 2) Question # # TODO: How many parameters do Casser et al. use? # # TODO: YOUR ANSWER ``` The total number of trainable parameters are 1,178,480 + 780,053 = 1,958,533. ``` # 3) Question # # TODO: How did Casser et al. reduce the parameters? # # TODO: YOUR ANSWER ``` Casser et al. reduce the parameters by: 1. Reducing number of convolutional filters throughout the network. 2. Replacing the transpose convolutions with light weight bilinear upsampling layers in decoder. ``` # 4) Question # # TODO: Why did Casser et al. reduce the parameters? # # TODO: YOUR ANSWER ``` Upon investigation, it was discovered by Casser et al. that around 33% of the ReLU activations are dead, meaning the network is too complex to the problem of mitochondria mask prediction. Therefore, the authors, decided to design a new modified version of the U-Net which utilizes 99.7% of the network. ``` # # You made it!! # # _ ___ /^^\ /^\ /^^\_ # _ _@)@) \ ,,/ '` ~ `'~~ ', `\. # _/o\_ _ _ _/~`.`...'~\ ./~~..,'`','',.,' ' ~: # / `,'.~,~.~ . , . , ~|, ,/ .,' , ,. .. ,,. `, ~\_ # ( ' _' _ '_` _ ' . , `\_/ .' ..' ' ` ` `.. `, \_ # ~V~ V~ V~ V~ ~\ ` ' . ' , ' .,.,''`.,.''`.,.``. ', \_ # _/\ /\ /\ /\_/, . ' , `_/~\_ .' .,. ,, , _/~\_ `. `. '., \_ # < ~ ~ '~`'~'`, ., . `_: ::: \_ ' `_/ ::: \_ `.,' . ', \_ # \ ' `_ '`_ _ ',/ _::_::_ \ _ _/ _::_::_ \ `.,'.,`., \-,-,-,_,_, # `'~~ `'~~ `'~~ `'~~ \(_)(_)(_)/ `~~' \(_)(_)(_)/ ~'`\_.._,._,'_;_;_;_;_; # ```
github_jupyter
![data-x-logo.png](attachment:data-x-logo.png) --- # Pandas Introduction **Author list:** Ikhlaq Sidhu & Alexander Fred Ojala **References / Sources:** Includes examples from Wes McKinney and the 10 min intro to Pandas **License Agreement:** Feel free to do whatever you want with this code ___ ### Topics: 1. Dataframe creation 2. Reading data in DataFrames 3. Data Manipulation ## Import package ``` # pandas import pandas as pd # Extra packages import numpy as np import matplotlib.pyplot as plt # for plotting # jupyter notebook magic to display plots in output %matplotlib inline plt.rcParams['figure.figsize'] = (10,6) # make the plots bigger ``` # Part 1: Creation of Pandas dataframes **Key Points:** Main data types in Pandas: * Series (similar to numpy arrays, but with index) * DataFrames (table or spreadsheet with Series in the columns) ### We use `pd.DataFrame()` and can insert almost any data type as an argument **Function:** `pd.DataFrame(data=None, index=None, columns=None, dtype=None, copy=False)` Input data can be a numpy ndarray (structured or homogeneous), dictionary, or DataFrame. ### 1.1 Create Dataframe using an array ``` # Try it with an array np.random.seed(0) # set seed for reproducibility a1 = np.random.randn(3) a2 = np.random.randn(3) a3 = np.random.randn(3) print (a1) print (a2) print (a3) # Create our first DataFrame w/ an np.array - it becomes a column # Check type # DataFrame from list of np.arrays # notice that there is no column label, only integer values, # and the index is set automatically # We can set column and index names # Add more columns to dataframe, like a dictionary, dimensions must match # DataFrame from 2D np.array np.random.seed(0) array_2d = np.array(np.random.randn(9)).reshape(3,3) # Create df with labeled columns ``` ### 1.2 Create Dataframe using an dictionary ``` # DataFrame from a Dictionary dict1 = {'a1': a1, 'a2':a2, 'a3':a3} # Note that we now have columns without assignment # We can add a list with strings and ints as a column ``` ### Pandas Series object Every column is a Series. Like an np.array, but we can combine data types and it has its own index ``` # Check type # Dtype object # Create a Series from a Python list, automatic index # Specific index # We can add the Series s to the DataFrame above as column Series # Remember to match indices # We can also rename columns # We can delete columns # or drop columns, see axis = 1 # does not change df1 if we don't set inplace=True # Print df1 # Or drop rows ``` ### 1.3 Indexing / Slicing a Pandas Datframe ``` # Example: view only one column # Or view several column # Slice of the DataFrame returned # this slices the first three rows first followed by first 2 rows of the sliced frame # Lets print the five first 2 elements of column a1 # This is a new Series (like a new table) # Lets print the 2 column, and top 2 values- note the list of columns ``` ### Instead of double indexing, we can use loc, iloc ##### loc gets rows (or columns) with particular labels from the index. #### iloc gets rows (or columns) at particular positions in the index (so it only takes integers). ### .iloc() ``` # iloc # Slice # iloc will also accept 2 'lists' of position numbers # Data only from row with index value '1' ``` ### .loc() ``` # Usually we want to grab values by column names # Note: You have to know indices and columns # Boolean indexing # Return full rows where a2>0 # Return column a3 values where a2 >0 # If you want the values in an np array ``` ### More Basic Statistics ``` # Get basic statistics using .describe() # Get specific statistics # We can change the index sorting ``` #### For more functionalities check this notebook https://github.com/ikhlaqsidhu/data-x/blob/master/02b-tools-pandas_intro-mplib_afo/legacy/10-minutes-to-pandas-w-data-x.ipynb # Part 2: Reading data in pandas Dataframe ### Now, lets get some data in CSV format. #### Description: Aggregate data on applicants to graduate school at Berkeley for the six largest departments in 1973 classified by admission and sex. https://vincentarelbundock.github.io/Rdatasets/doc/datasets/UCBAdmissions.html ``` # Read in the file # Check statistics # Columns # Head # Tail # Groupby # Describe # Info # Unique # Total number of applicants to Dept A # Groupby # Plot using a bar graph ```
github_jupyter
<b>General Tips</b> <i> <li>When the best course for visualizing certain data is unclear, start with a blank piece of paper.</li> <li>Sketch out potential views to see them side‐by‐side and determine what will work best for getting your message across to your audience.</li> <li>Create a version of the graph (let’s call it A), then make a copy of it (B) and make a single change. Then determine which looks better—A or B. Often, the practice of seeing slight variations next to each other makes it quickly clear which view is superior.</li> <li>At any point, if the best path is unclear, seek feedback. The fresh set of eyes that a friend or colleague can bring to the data visualization endeavor is invaluable.</li> <li>One of my biggest tips for success in storytelling with data is to allow adequate time for it. If we don’t consciously recognize that this takes time to do well and budget accordingly, our time can be entirely eaten up by the other parts of the analytical process. <li>Imitation really is the best form of flattery. If you see a data visualization or example of storytelling with data that you like, consider how you might adapt the approach for your own use. Pause to reflect on what makes it effective. Make a copy of it and create a visual library that you can add to over time and refer to for inspiration.</li> <li>There are a number of great blogs and resources on the topic of data visualization and communicating with data that contain many good examples. Here are a few of my current personal favorites: eagereyes.org / fivethirtyeight.com/datalab / flowingdata.com / thefunctionalart.com / theguardian.com/data / HelpMeViz.com / junkcharts.typepad.com / makeapowerfulpoint.com / perceptualedge.com / visualisingdata.com / vizwiz.blogspot.com / storytellingwithdata.com / wtfviz.net</li> <li>To the extent that it makes sense given the task at hand, don’t be afraid to let your own style develop and creativity come through when you communicate with data. Company brand can also play a role in developing a data visualization style; consider your company’s brand and whether there are opportunities to fold that into how you visualize and communicate with data. Just make sure that your approach and stylistic elements are making the information easier—not more difficult—for your audience to consume.</li> </i> <b>Recap on this Book:</b> <i> <ol> <li>Understand the context. Build a clear understanding of who you are communicating to, what you need them to know or do, how you will communicate to them, and what data you have to back up your case. Employ concepts like the 3‐minute story, the Big Idea, and storyboarding to articulate your story and plan the desired content and flow.</li> <li>Choose an appropriate visual display. When highlighting a number or two, simple text is best. Line charts are usually best for continuous data. Bar charts work great for categorical data and must have a zero baseline. Let the relationship you want to show guide the type of chart you choose. Avoid pies, donuts, 3D, and secondary y‐axes due to difficulty of visual interpretation.</li> <li>Eliminate clutter. Identify elements that don’t add informative value and remove them from your visuals. Leverage the Gestalt principles to understand how people see and identify candidates for elimination. Use contrast strategically. Employ alignment of elements and maintain white space to help make the interpretation of your visuals a comfortable experience for your audience.</li> <li>Focus attention where you want it. Employ the power of preattentive attributes like color, size, and position to signal what’s important. Use these strategic attributes to draw attention to where you want your audience to look and guide your audience through your visual. Evaluate the effectiveness of preattentive attributes in your visual by applying the “where are your eyes drawn?” test.</li> <li>Think like a designer. Offer your audience visual affordances as cues for how to interact with your communication: highlight the important stuff, eliminate distractions, and create a visual hierarchy of information. Make your designs accessible by not overcomplicating and leveraging text to label and explain. Increase your audience’s tolerance of design issues by making your visuals aesthetically pleasing. Work to gain audience acceptance of your visual designs.</li> <li>Tell a story. Craft a story with clear beginning (plot), middle (twists), and end (call to action). Leverage conflict and tension to grab and maintain your audience’s attention. Consider the order and manner of your narrative. Utilize the power of repetition to help your stories stick. Employ tactics like vertical and horizontal logic, reverse storyboarding, and seeking a fresh perspective to ensure that your story comes across clearly in your communication.</li> </ol> </i>
github_jupyter
``` #used an environment with directml # with the help of https://www.youtube.com/watch?v=gjVFH7NHB9s #ref to choose the env in jupyter notebook: https://towardsdatascience.com/get-your-conda-environment-to-show-in-jupyter-notebooks-the-easy-way-17010b76e874 import tensorflow as tf from tensorflow import keras import matplotlib.pyplot as plt import numpy as np tf.config.experimental.list_physical_devices() tf.test.is_gpu_available() from tensorflow.python.client import device_lib device_lib.list_local_devices() (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data() x_train.shape x_train[0].shape y_train[0] def plot_sample(index): plt.figure(figsize=(10,1)) plt.imshow(x_train[index]) plot_sample(0) plot_sample(3) y_train[3] classes = ['airplane','automobile','bird','cat','deer','dog','frog','horse','ship','truck'] classes[y_train[3][0]] X_train_scaled = x_train/255 X_test_scaled = x_test/255 # do one hot encoding y_train_categorical = keras.utils.to_categorical( y_train,num_classes=10,dtype='float32' ) y_train_categorical[3] y_test_categorical = keras.utils.to_categorical( y_test,num_classes=10,dtype='float32' ) #model building model = keras.Sequential([ keras.layers.Flatten(input_shape=(32,32,3)), keras.layers.Dense(3000,activation='relu'), keras.layers.Dense(1000,activation='relu'), keras.layers.Dense(10,activation='sigmoid'), ]) model.compile( optimizer='SGD', loss='categorical_crossentropy', #if one hot encoded metrics=['accuracy'] ) model.fit(X_train_scaled,y_train_categorical,epochs=50) classes[y_test[1][0]] classes[np.argmax(model.predict(np.array([X_test_scaled[1]])))] def get_model(): model = keras.Sequential([ keras.layers.Flatten(input_shape=(32,32,3)), keras.layers.Dense(3000,activation='relu'), keras.layers.Dense(1000,activation='relu'), keras.layers.Dense(10,activation='sigmoid'), ]) model.compile( optimizer='SGD', loss='categorical_crossentropy', #if one hot encoded metrics=['accuracy'] ) return model %%timeit -n1 -r1 with tf.device('/DML:0'): cpu_model = get_model() cpu_model.fit(X_train_scaled, y_train_categorical, epochs=1) %%timeit -n1 -r1 with tf.device('/CPU:0'): cpu_model = get_model() cpu_model.fit(X_train_scaled, y_train_categorical, epochs=1) %%timeit -n1 -r1 with tf.device('/DML:1'): cpu_model = get_model() cpu_model.fit(X_train_scaled, y_train_categorical, epochs=1) ``` ![image.png](attachment:image.png) ``` fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] train_images.shape plt.imshow(train_images[0]) train_labels[0] class_names[train_labels[0]] plt.figure(figsize=(3,3)) for i in range(5): plt.imshow(train_images[i]) plt.xlabel(class_names[train_labels[i]]) plt.show() train_images_scaled = train_images / 255.0 test_images_scaled = test_images / 255.0 def get_model(hidden_layers=1): layers = [ keras.layers.Flatten(input_shape=(28,28)), keras.layers.Dense(hidden_layers,activation='relu'), keras.layers.Dense(10,activation='sigmoid') ] # Your code goes here-----------START # Create Flatten input layers # Create hidden layers that are equal to hidden_layers argument in this function # Create output # Your code goes here-----------END model = keras.Sequential(layers) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) return model model = get_model(1) model.fit(train_images_scaled, train_labels, epochs=5) np.argmax(model.predict(test_images_scaled)[2]) test_labels[2] tf.config.experimental.list_physical_devices() ``` 5 Epochs performance comparison for 1 hidden layer ``` %%timeit -n1 -r1 with tf.device('/CPU:0'): cpu_model = get_model(1) cpu_model.fit(train_images_scaled, train_labels, epochs=5) %%timeit -n1 -r1 with tf.device('/DML:0'): cpu_model = get_model(1) cpu_model.fit(train_images_scaled, train_labels, epochs=5) %%timeit -n1 -r1 with tf.device('/DML:1'): cpu_model = get_model(1) cpu_model.fit(train_images_scaled, train_labels, epochs=5) ``` 5 Epocs performance comparison with 5 hidden layers ``` %%timeit -n1 -r1 with tf.device('/CPU:0'): cpu_model = get_model(5) cpu_model.fit(train_images_scaled, train_labels, epochs=5) %%timeit -n1 -r1 with tf.device('/DML:0'): cpu_model = get_model(5) cpu_model.fit(train_images_scaled, train_labels, epochs=5) %%timeit -n1 -r1 with tf.device('/DML:1'): cpu_model = get_model(5) cpu_model.fit(train_images_scaled, train_labels, epochs=5) ```
github_jupyter
``` import json import bz2 import regex from tqdm import tqdm from scipy import sparse import pandas as pd import numpy as np import nltk import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline %pylab inline responses = [] with bz2.BZ2File('banki_responses.json.bz2', 'r') as thefile: for row in tqdm(thefile): resp = json.loads(row) if not resp['rating_not_checked'] and (len(resp['text'].split()) > 0): responses.append(resp) ``` # Домашнее задание по NLP # 1 [100 баллов] ## Классификация по тональности В этом домашнем задании вам предстоит классифицировать по тональности отзывы на банки с сайта banki.ru. [Ссылка на данные](https://drive.google.com/open?id=1CPKtX5HcgGWRpzbWZ2fMCyqgHGgk21l2). Данные содержат непосредственно тексты отзывов, некоторую дополнительную информацию, а также оценку по шкале от 1 до 5. Тексты хранятся в json-ах в массиве responses. Посмотрим на пример отзыва: ``` responses[99] ``` ## Часть 1. Анализ текстов [40/100] 1. Посчитайте количество отзывов в разных городах и на разные банки 2. Постройте гистограмы длин слов в символах и в словах (не обязательно) 3. Найдите 10 самых частых: * слов * слов без стоп-слов * лемм * существительных 4. Постройте кривую Ципфа 5. Ответьте на следующие вопросы: * какое слово встречается чаще, "сотрудник" или "клиент"? * сколько раз встречается слова "мошенничество" и "доверие"? 6. В поле "rating_grade" записана оценка отзыва по шкале от 1 до 5. Используйте меру $tf-idf$, для того, чтобы найти ключевые слова и биграмы для положительных отзывов (с оценкой 5) и отрицательных отзывов (с оценкой 1) ## Часть 2. Тематическое моделирование [20/100] 1. Постройте несколько тематических моделей коллекции документов с разным числом тем. Приведите примеры понятных (интерпретируемых) тем. 2. Найдите темы, в которых упомянуты конкретные банки (Сбербанк, ВТБ, другой банк). Можете ли вы их прокомментировать / объяснить? Эта часть задания может быть сделана с использованием gensim. ## Часть 3. Классификация текстов [40/100] Сформулируем для простоты задачу бинарной классификации: будем классифицировать на два класса, то есть, различать резко отрицательные отзывы (с оценкой 1) и положительные отзывы (с оценкой 5). 1. Составьте обучающее и тестовое множество: выберите из всего набора данных N1 отзывов с оценкой 1 и N2 отзывов с оценкой 5 (значение N1 и N2 – на ваше усмотрение). Используйте ```sklearn.model_selection.train_test_split``` для разделения множества отобранных документов на обучающее и тестовое. 2. Используйте любой известный вам алгоритм классификации текстов для решения задачи и получите baseline. Сравните разные варианты векторизации текста: использование только униграм, пар или троек слов или с использованием символьных $n$-грам. 3. Сравните, как изменяется качество решения задачи при использовании скрытых тем в качестве признаков: * 1-ый вариант: $tf-idf$ преобразование (```sklearn.feature_extraction.text.TfidfTransformer```) и сингулярное разложение (оно же – латентый семантический анализ) (```sklearn.decomposition.TruncatedSVD```), * 2-ой вариант: тематические модели LDA (```sklearn.decomposition.LatentDirichletAllocation```). Используйте accuracy и F-measure для оценки качества классификации. Ниже написан примерный Pipeline для классификации текстов. Эта часть задания может быть сделана с использованием sklearn. ``` from sklearn.pipeline import Pipeline from sklearn.ensemble import RandomForestClassifier # !!! На каждом этапе Pipeline нужно указать свои параметры # 1-ый вариант: tf-idf + LSI # 2-ой вариант: LDA # clf = Pipeline([ # ('vect', CountVectorizer(analyzer = 'char', ngram_range={4,6})), # ('clf', RandomForestClassifier()), # ]) clf = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('tm', TruncatedSVD()), ('clf', RandomForestClassifier()) ]) ``` ## Бонус [20] Используйте для классификации эмбеддинги слов. Улучшилось ли качество?
github_jupyter
# Dive into Deep Learning, Classifying images The goal of this blog post is to explain the process of training a deep learning model to classify images (pixels) of insects: beetles, cockroaches, and dragonflies. The neural network (model) will be evaluated on how it classfied the images using Shapley Additive Explanations. The first step is to import all of the necessary libraries. This neural network will be using the tensorflow package and specifically, the keras module. ``` import necessary libraries import matplotlib.pyplot as plt import numpy as np import pandas as pd #import tensorflow as tf #from tensorflow import keras #import shap #from PIL import Image #import urllib import io import random import os from keras.preprocessing.image import ImageDataGenerator,load_img from keras.utils import to_categorical from sklearn.model_selection import train_test_split from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization ``` The next step is to specify the image size because all the images need to be of the same size in the model and specify three color channels. ``` image specifications width=200 height=200 size=(width,height) channels = 3 ``` Now, we will create the training data by finding the file paths of each image. The file paths of the images will be added to a training file list and the corresponding image classifiation (type of insect) will be added to a categories list. These lists are appended to a training dataframe. ``` training data trainfiles = [] categories = [] for path in os.listdir("insects/train/beetles"): full_path = os.path.join("insects/train/beetles", path) if os.path.isfile(full_path): trainfiles.append(full_path) categories.append("beetles") for path in os.listdir("insects/train/cockroach"): full_path = os.path.join("insects/train/cockroach", path) if os.path.isfile(full_path): trainfiles.append(full_path) categories.append("cockroach") for path in os.listdir("insects/train/dragonflies"): full_path = os.path.join("insects/train/dragonflies", path) if os.path.isfile(full_path): trainfiles.append(full_path) categories.append("dragonflies") df_train = pd.DataFrame({ 'filename': trainfiles, 'category': categories }) ``` Similarly, we will create the testing data. The file paths of the images will be added to a test file list and the corresponding image classifiation (type of insect) will be added to a categories list. These lists are appended to a test dataframe. ``` test data testfiles = [] categories = [] for path in os.listdir("insects/test/beetles"): full_path = os.path.join("insects/test/beetles", path) if os.path.isfile(full_path): testfiles.append(full_path) categories.append("beetles") for path in os.listdir("insects/test/cockroach"): full_path = os.path.join("insects/test/cockroach", path) if os.path.isfile(full_path): testfiles.append(full_path) categories.append("cockroach") for path in os.listdir("insects/test/dragonflies"): full_path = os.path.join("insects/test/dragonflies", path) if os.path.isfile(full_path): testfiles.append(full_path) categories.append("dragonflies") df_test = pd.DataFrame({ 'filename': testfiles, 'category': categories }) ``` This neural network will use a convolution 2D neural net architecture. We will add layers sequentially and each one has separate biases and weights. The output and shape of each layer is shown below. The loss function is 'categorical_crossentropy' and while training the model, this function will be minimized. ``` # neural net model model=Sequential() model.add(Conv2D(32,(3,3),activation='relu',input_shape=(width,height,channels))) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3,3),activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(128,(3,3),activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512,activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.2)) model.add(Dense(3,activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=['accuracy']) model.summary() ``` The training data will be split into training and validation sets to be used in the model. ``` train_df,validate_df = train_test_split(df_train,test_size=0.2, random_state=42) total_train=train_df.shape[0] total_validate=df_test.shape[0] batch_size = 10 train_df = train_df.reset_index(drop=True) validate_df = validate_df.reset_index(drop=True) ``` After some research, ImageDataGenerator seemed like the most optimal solution to create this image classification model as it can efficiently load images in batches. The output shows the number of images in each data set. ``` train_datagen = ImageDataGenerator(rotation_range=15, rescale=1./255, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1) train_generator = train_datagen.flow_from_dataframe(train_df, x_col='filename',y_col='category', target_size=size, class_mode='categorical', batch_size=batch_size) validation_datagen = ImageDataGenerator(rescale=1./255) validation_generator = validation_datagen.flow_from_dataframe( validate_df, x_col='filename', y_col='category', target_size=size, class_mode='categorical', batch_size=batch_size) test_datagen = ImageDataGenerator(rotation_range=15, rescale=1./255, shear_range=0.1, zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1, height_shift_range=0.1) test_generator = test_datagen.flow_from_dataframe(df_test,x_col='filename',y_col='category', target_size=size, class_mode='categorical', batch_size=batch_size) ``` Now, we will fit the model on the training data and validate it on the validation data. Various epochs and batch sizes were tried and due to the time it took to run the model, 3 and 10 were chosen respectively. ``` hist = model.fit( train_generator, epochs=3, validation_data=validation_generator, batch_size=batch_size, verbose=1 ) ``` As the plot illustrates, the model does not have a high accuracy. A lot of layers were added to the model including batch normalization and dropout layers to make the code run more efficiently. ``` fig, axes = plt.subplots(1,2,figsize=(12, 4)) for ax, measure in zip(axes, ['loss', 'accuracy']): ax.plot(hist.history[measure], label=measure) ax.plot(hist.history['val_' + measure], label='val_' + measure) ax.legend() plt.show() model.save("model1.h5") ``` The test accuracy is about 63%. ``` test_loss, test_acc = model.evaluate(test_generator) test_acc ``` The following code manipulates the test data frame to be used for predictions. ``` test_df = pd.DataFrame({ 'filename': testfiles }) nb_samples = test_df.shape[0] ``` The following code makes predictions from the model. ``` model.predict(test_generator, steps=np.ceil(nb_samples/batch_size)) ``` Checking the prediction of one image by feeding an image of a beetle. The classification was a beetle. Yay! ``` results={ 0:'beetles', 1:'cockroach', 2:'dragonflies' } from PIL import Image import numpy as np im=Image.open("insects/test/beetles/5556745.jpg") im=im.resize(size) im=np.expand_dims(im,axis=0) im=np.array(im) im=im/255 pred=model.predict_classes([im])[0] print(pred,results[pred]) ``` We will describe how well the model performed using shapley additive explanations. ``` import shap ``` I tried the following code and several other versions of what is below to convert the training and test sets into numpy arrays to be used in the gradient explainer and for the shapley values. After analyzing the output of the train and test generator (code above), I realized that the data was in batches and I would need to unbatch the data for this to work and plot the images correctly. So, I scratched this idea and skipped to what is shown below. ``` # xtrain = [] # ytrain = [] # xtrain=np.concatenate([train_generator.next()[0] for i in range(train_generator.__len__())]) # ytrain=np.concatenate([train_generator.next()[1] for i in range(train_generator.__len__())]) # xtest = [] # ytest = [] # xtest=np.concatenate([test_generator.next()[0] for i in range(test_generator.__len__())]) # ytest=np.concatenate([test_generator.next()[1] for i in range(test_generator.__len__())]) # ytest = np.where(ytest == 0, "beetles", np.where(ytest == 1, "cockroach", "dragonflies")) # explainer = shap.GradientExplainer(model, xtrain) # shap_vals = explainer.shap_values(xtest[:3]) ``` The following code produces the shapley values on the test data using a gradient from the model and background (training data). ``` explainer = shap.GradientExplainer(model, train_generator[0][0]) shap_vals, index = explainer.shap_values(test_generator[0][0], ranked_outputs = 3) ``` Now, we will create a numpy array to label the images. ``` names = ['beetles', 'cockroach', 'dragonflies'] index_names = np.vectorize(lambda x: names[x])(index) ``` Finally, we will plot the images and see what parts of the images are most important in creating the image classification. ``` # shap.image_plot(shap_vals, test_generator[0][0], labels = index_names, show = False) # plt.savefig('shap.jpg') from IPython import display display.Image("./shap.jpg") ```
github_jupyter
# 更多字符串和特殊方法 - 前面我们已经学了类,在Python中还有一些特殊的方法起着非常重要的作用,这里会介绍一些特殊的方法和运算符的重载,以及使用特殊方法设计类 ## str 类 - 一个str对象是不可变的,也就是说,一旦创建了这个字符串,那么它的内容在认为不改变的情况下是不会变的 - s1 = str() - s2 = str('welcome to Python') ## 创建两个对象,分别观察两者id - id为Python内存地址 ``` a = id(100) b = id(12) a is b ``` ## 处理字符串的函数 - len - max - min - 字符串一切是按照ASCII码值进行比较 ## 下角标运算符 [] - 一个字符串是一个字符序列,可以通过索引进行访问 - 观察字符串是否是一个可迭代序列 \__iter__ ## 切片 [start: end] - start 默认值为0 - end 默认值为-1 ## 链接运算符 + 和复制运算符 * - \+ 链接多个字符串,同时''.join()也是 - \* 复制多个字符串 ## in 和 not in 运算符 - in :判断某个字符是否在字符串内 - not in :判断某个字符是否不在字符串内 - 返回的是布尔值 ``` a3 = 'yg is Superman)' 'yg' in a3 def ``` ## 比较字符串 - ==, !=, >=, <=, >, < - 依照ASCII码值进行比较 ## 测试字符串 ![](../Python/Photo/99.png) - 注意: > - isalnum() 中是不能包含空格,否则会返回False ``` a = '111' count1 = 0 count2 = 0 count3 = 0 for i in a: if i.islower() is True: count1 +=1 if i.isupper() is True: count2 +=1 if i.isdigit() is True: count3 +=1 else: if count1 == 0: print('密码必须含有小写字母') if count2 == 0: print('密码必须含有大写') if count3 == 0: print('密码必须含有数字') if count1 !=0 and count2 !=0 and count3 !=0: print('密码设置成功') print(count1,count2,count3) n = 'a b' n.isalpha() ``` ## 搜索子串 ![](../Python/Photo/100.png) ## 转换字符串 ![](../Python/Photo/101.png) ## 删除字符串 ![](../Python/Photo/146.png) ``` a = ' dsdss' a.lstrip() a = ' skjk kjk lk l;k; dkjsk ' a.replace(' ','') ``` ## 格式化字符串 ![](../Python/Photo/103.png) ## EP: - 1 ![](../Python/Photo/104.png) - 2 随机参数100个数字,将www.baidu.com/?page=进行拼接 ## Python高级使用方法 -- 字符串 - 我们经常使用的方法实际上就是调用Python的运算重载 ![](../Python/Photo/105.png) # Homework - 1 ![](../Python/Photo/106.png) ``` ssn = input('输入安全号码:') yi = ssn[0:3] er = ssn[4:6] san = ssn[7:11] if ssn.__len__() = 11: if yi.isdigit() and er.isdigit() and san.isdigit() is True: if ssn[3] == '-' and ssn[6] == '-' : print('Valid SSN') else: print('Invalid SSN') else: print('Invalid SSN') else: print('Invalid SSN') ``` - 2 ![](../Python/Photo/107.png) ``` a = 'dsds' b = 'sd' a.find(b) ``` - 3 ![](../Python/Photo/108.png) ``` pwd = input('输入密码:') count1 = 0 count3 = 0 if pwd.__len__() >= 8: for i in pwd: if i.islower() or i.isupper() is True: count1 +=1 if i.isdigit() is True: count3 +=1 else: if count1 !=0 and count3 >=2: print('Valid password') else: print('invalid password') else: print('invalid password') ``` - 4 ![](../Python/Photo/109.png) ``` def countLetters(s): counts = 0 for i in s: if i.islower() or i.isupper() is True: counts += 1 print('字母个数为:'+ str(counts)) countLetters('123123a') ``` - 5 ![](../Python/Photo/110.png) ``` def getNumber(uppercaseLetter): number = uppercaseLetter ``` - 6 ![](../Python/Photo/111.png) ``` def reverse(s): fanxiang = '' c = s.__len__()-1 for i in range(s.__len__()): fanxiang = s[c] c -=1 print(fanxiang,end='') reverse('asdf') ``` - 7 ![](../Python/Photo/112.png) - 8 ![](../Python/Photo/113.png) ``` num = str(input('输入:')) jiaoyan = 10 - (int(num[0]) + 3 * int(num[1]) + int(num[2]) + 3 * int(num[3]) + int(num[4]) + 3 * int(num[5]) + int(num[6]) + 3 * int(num[7]) + int(num[8]) + 3 * int(num[9]) + int(num[10]) + 3 * int(num[11])) % 10 print(num + str(jiaoyan)) ``` - 9 ![](../Python/Photo/114.png)
github_jupyter
``` _= """ ref https://www.reddit.com/r/algotrading/comments/e44pdd/list_of_stock_tickers_from_yahoo/ https://old.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nasdaq&render=download AMEX https://old.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=amex&render=download NYSE https://old.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nyse&render=download """ # look for high vol high vol. # assume option would be too. # compute historical implied volatility? # suggest trading ideas. # depending on: vol and mean ret are relatively high or low points and their dp and ddp. ### Can you actually apply any of this to gain an edge when trading? + Now I don't really buy technical analysis, but the immediate indicator that comes to mind is "relative strength index", obviously when you mention TA, we are implying trying to forcast a price trend, using historical data and pattern which has been proven to not be successful, albeit many successful traders swear by TAs. Thus here we will demonstrate how well/badly our 2 mean reverting time series can forecasting future price changes compared to buy-and-hold. + Perhaps if you split the 2D distribution plot of rolling ret mean and hist volatility into 4 quandrants (show below) you can opt to deploy different option strategies accordingly. ``` lets contrain ourselves to the below per BAT's talk linked below! Short Strangle, Iron Condors, Credit Spread, Diagnal Spread, Ratio Spread, Broken Wing Butterfly ``` + Tony Battisa, Tastytrade, How to Use Options Strategies & Key Mechanics https://www.youtube.com/watch?v=T6uA_XHunRc # # ^ # high vol | high vol # low ret | high ret # -----------|----------> # low vol | low vol # low ret | high ret # # # high vol, low ret -> short put (or credit spread) # high vol, high ret -> short call (or credit spread) # high vol, mid ret -> short strangle (or iron condor) # mid vol, low ret -> Ratio Spread (sell 2 otm puts, buy 1 atm put) # mid vol, high ret -> Ratio Spread (sell 2 otm call, buy 1 atm call) # low vol, low ret -> Broken Wing Butter Fly # low vol, high ret -> Broken Wing Butter Fly # low vol, mid ret -> Diagnal to bet on vol increase. # # product idea. deploy below as a website, earn ad revenue. # since both signals are likely mean reverting # and assuming realized volatility tracks implied volatilityvol_change # # by sectioning the 2 changes to zones, we can accordingly decide what strategy to deploy # if vol increase, price increase - diagonal - short front month call, long back month call # if vol increase, price no-change - diagonal - short call strangle, long back month? # if vol increase, price decrease - diagonal - short front month put, long back month put # if vol decrease, price increase - short put # if vol decrease, price no-change - iron condor # if vol decrease, price decrease - short call # https://www.youtube.com/watch?v=T6uA_XHunRc, ratios spreads or broken wing butter fly # if vol no-change, price increase - short put ratio spread # if vol no-change, price no-change - iron condor ratio spreads? # if vol no-change, price decrease - short call ratio spread # to simplify backtesting. we will just see if we can predict the trend # a win for each trade gets a +1 a loss for each trade gets a -1 # for the same period, for buy and hold, +1 means price ret in that period is > 0. np.random.rand(10,10) ```
github_jupyter
``` import numpy as np import pandas as pd import random def bootstrapdf(df): df = df.sample(frac=1, replace=True) return df def check_for_leaf(df,counter, min_samples, max_depth): unique_classes = np.unique(df) if len(unique_classes) == 1 or len(df)<=min_samples or counter==max_depth: labelcol = df uniq_cls, cnt = np.unique(labelcol, return_counts=True) classification = unique_classes[cnt.argmax()] return classification else: return False def gini_imp_test(df, col_index): df.reset_index(inplace = True, drop = True) classes = df.iloc[:,-1] feature = df.iloc[:,col_index] if len(feature.unique()) == 2: gini_imp = 0 for i in np.unique(feature): idx = np.where(feature == i) label = classes.loc[idx].values a, b = np.unique(label, return_counts = True) list1 = [(i/sum(b))**2 for i in b] prob = 1 - sum(list1) wt = len(idx[0]) / df.shape[0] gini_imp += wt * prob return gini_imp, i else: label = np.sort(feature.unique())[1:-1] best_gini_imp = float('inf') split_val = 0 for i in label: idx1 = np.where(feature > i) idx2 = np.where(feature <= i) if len(idx1[0]) > 2 and len(idx2[0]) > 2: b1, b1cnt = np.unique(classes.loc[idx1].values, return_counts = True) b2, b2cnt = np.unique(classes.loc[idx2].values, return_counts = True) list1 = [(i/sum(b1cnt))**2 for i in b1cnt] list2 = [(i/sum(b2cnt))**2 for i in b2cnt] prob1 = 1 - sum(list1) prob2 = 1 - sum(list2) gini = ((sum(b1cnt)/df.shape[0])*prob1) + ((sum(b2cnt)/df.shape[0])*prob2) if gini < best_gini_imp: best_gini_imp = gini split_val = i else: continue return best_gini_imp, split_val def best_node(df, col_list): best_gini_imp = float('inf') value = 0 col = 0 for i in col_list: gini, val = gini_imp_test(df, i) if gini < best_gini_imp: best_gini_imp = gini value = val col = i return col, value def split_df(df, col_index, split_val): feature = df.iloc[:,col_index] if feature.dtypes == object: temp1 = df[df.iloc[:,col_index] == split_val] temp2 = df[df.iloc[:,col_index] != split_val] return temp1, temp2 elif feature.dtypes != object: temp1 = df[df.iloc[:,col_index] <= split_val] temp2 = df[df.iloc[:,col_index] >= split_val] temp1.reset_index(inplace = True, drop = True) temp2.reset_index(inplace = True, drop = True) return temp1, temp2 def check_purity(data): label_column = data[:, -1] unique_classes = np.unique(label_column) if len(unique_classes) == 1: return True else: return False def classify_data(data): label_column = data[:,-1] unique_classes, counts_unique_classes = np.unique(label_column, return_counts=True) index = counts_unique_classes.argmax() classification = unique_classes[index] return classification def metrics(ts_lb,answer): TN = 0 TP = 0 FN = 0 FP = 0 for i,j in zip(ts_lb,answer): if j==1 and i==1: TP += 1 elif(j==1 and i==0): FN += 1 elif(j==0 and i==1): FP += 1 elif(j==0 and i==0): TN += 1 Accuracy = (TP + TN)/(TP + FP + TN + FN) Precision = TP/(TP + FP) Recall = TP/(TP + FN) f1_score = (2*Precision*Recall)/(Precision + Recall) return Accuracy, Precision, Recall, f1_score def decision_tree(df, columns, num_features, counter = 0, min_samples = 10, max_depth = 5): if (check_purity(df.values)) or (counter == max_depth) or (len(df) < min_samples): classification = classify_data(df.values) return classification else: counter += 1 col_list = random.sample(columns, num_features) column, value = best_node(df, col_list) if df.iloc[:,column].dtype == object: columns.remove(column) branch1, branch2 = split_df(df, column, value) if len(branch1) == 0 or len(branch2) == 0: classification = classify_data(df.values) return classification query = "{} <= {}".format(column, value) branch = {query: []} left_branch = decision_tree(branch1, columns, num_features, counter) right_branch = decision_tree(branch2, columns, num_features, counter) if left_branch == right_branch: branch = left_branch else: branch[query].append(left_branch) branch[query].append(right_branch) return branch def random_forest(df, num_trees, num_features): trees = [] for i in range(num_trees): df = bootstrapdf(df) columns = list(df.iloc[:,:-1].columns) tree = decision_tree(df, columns, num_features) trees.append(tree) return trees def predict(model, test_data): classes = [] for tree in model: cls = [] for i in range(len(test_data)): t = tree col,_,val = list(t.keys())[0].split() col = int(col) try: val = float(val) except: val = str(val) key = list(t.keys())[0] key_val = t[key] while True: if test_data.iloc[i,col] <= val: t = t[key][0] if type(t) != dict: cls.append(t) break else: col,_,val = list(t.keys())[0].split() col = int(col) try: val = float(val) except: val = str(val) key = list(t.keys())[0] key_val = t[key] else: t = t[key][1] if type(t) != dict: cls.append(t) break else: col,_,val = list(t.keys())[0].split() col = int(col) try: val = float(val) except: val = str(val) key = list(t.keys())[0] key_val = t[key] cls = [int(i) for i in cls] classes.append(cls) classes = np.array(classes) final_class = [] for i in range(len(test_data)): unique_classes, counts_unique_classes = np.unique(classes[:,i], return_counts=True) index = counts_unique_classes.argmax() classification = unique_classes[index] final_class.append(classification) final_class test_data["Class"] = final_class return test_data def k_fold(df): num_trees = int(input("Enter number of trees: ")) num_features = int(input("Enter number of features for each split: ")) k = int(input("Enter k value: ")) metrics_list = [] for i in range(k): splitdfs = np.array_split(df, k) test = splitdfs[i] del(splitdfs[i]) train = pd.concat(splitdfs) test.reset_index(inplace = True, drop = True) train.reset_index(inplace = True, drop = True) actual = test.iloc[:,-1] test = test.iloc[:,:-1] model = random_forest(train, num_trees, num_features) results = predict(model, test) Accuracy, Precision, Recall, f1_score = metrics(actual, results["Class"]) metrics_list.append([Accuracy, Precision, Recall, f1_score]) metrics_list = np.array(metrics_list) metrics_list = np.mean(metrics_list, axis = 0) print("Accuracy: ",metrics_list[0]) print("Precision: ",metrics_list[1]) print("Recall: ",metrics_list[2]) print("f1_score: ",metrics_list[3]) return metrics_list df1 = pd.read_csv("project3_dataset1.txt", sep = '\t', header=None) k_fold(df1) df2 = pd.read_csv("project3_dataset2.txt", sep = '\t', header=None) k_fold(df2) ```
github_jupyter
_Lambda School Data Science — Tree Ensembles_ # Decision Trees — with ipywidgets! ### Notebook requirements - [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html): works in Jupyter but [doesn't work on Google Colab](https://github.com/googlecolab/colabtools/issues/60#issuecomment-462529981) - [mlxtend.plotting.plot_decision_regions](http://rasbt.github.io/mlxtend/user_guide/plotting/plot_decision_regions/): `pip install mlxtend` ## Regressing a wave ``` import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split # Example from http://scikit-learn.org/stable/auto_examples/tree/plot_tree_regression.html def make_data(): import numpy as np rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 2 * (0.5 - rng.rand(16)) return X, y X, y = make_data() X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42) plt.scatter(X_train, y_train) plt.scatter(X_test, y_test); from sklearn.tree import DecisionTreeRegressor def regress_wave(max_depth): tree = DecisionTreeRegressor(max_depth=max_depth) tree.fit(X_train, y_train) print('Train R^2 score:', tree.score(X_train, y_train)) print('Test R^2 score:', tree.score(X_test, y_test)) plt.scatter(X_train, y_train) plt.scatter(X_test, y_test) plt.step(X, tree.predict(X)) plt.show() from ipywidgets import interact interact(regress_wave, max_depth=(1,8,1)); ``` ## Classifying a curve ``` import numpy as np curve_X = np.random.rand(1000, 2) curve_y = np.square(curve_X[:,0]) + np.square(curve_X[:,1]) < 1.0 curve_y = curve_y.astype(int) from sklearn.linear_model import LogisticRegression from mlxtend.plotting import plot_decision_regions lr = LogisticRegression(solver='lbfgs') lr.fit(curve_X, curve_y) plot_decision_regions(curve_X, curve_y, lr, legend=False) plt.axis((0,1,0,1)); from sklearn.tree import DecisionTreeClassifier def classify_curve(max_depth): tree = DecisionTreeClassifier(max_depth=max_depth) tree.fit(curve_X, curve_y) plot_decision_regions(curve_X, curve_y, tree, legend=False) plt.axis((0,1,0,1)) plt.show() interact(classify_curve, max_depth=(1,8,1)); ``` ## Titanic survival, by age & fare ``` import seaborn as sns from sklearn.impute import SimpleImputer titanic = sns.load_dataset('titanic') imputer = SimpleImputer() titanic_X = imputer.fit_transform(titanic[['age', 'fare']]) titanic_y = titanic['survived'].values from sklearn.linear_model import LogisticRegression from mlxtend.plotting import plot_decision_regions lr = LogisticRegression(solver='lbfgs') lr.fit(titanic_X, titanic_y) plot_decision_regions(titanic_X, titanic_y, lr, legend=False); plt.axis((0,75,0,175)); def classify_titanic(max_depth): tree = DecisionTreeClassifier(max_depth=max_depth) tree.fit(titanic_X, titanic_y) plot_decision_regions(titanic_X, titanic_y, tree, legend=False) plt.axis((0,75,0,175)) plt.show() interact(classify_titanic, max_depth=(1,8,1)); ```
github_jupyter
### Video Games Dataset: EDA #### 1. Describe Dataset - **Who:** The data was acquired from Kaggle and supplied by the user Gregory Smith (https://www.kaggle.com/gregorut/videogamesales). The data was scraped from www.vgchartz.com. - **What:** The dataset contains a list of video games with sales greater than 100,000 from 1980 to 2020. It contains information such as the platform the game was made available, year of release, genre, publisher, sales in NA, sales in JP, sales in EU, sales in the rest of the world and global sales (total). The data set also includes the rank of games in terms of overall sales. **NOTE: Sales are in millions** - **When:** The data set was last updated 4 years ago but contains games released from 1980 to seemingly 2020. - **Why:** The video game industry is a very competitive yet profitable industry. While big companies with large amounts of resources have an edge over smaller companies, we have recently seen many small companies finding huge success. Not only in game creation but in the case of streamers for example, playing a game before it becomes mainstream might give you an edge against bigger name streamers. With this data set, we are able to gain insight into general idea such as performance of companies, most popular titles and genres. We are also able to dive deeper and look at changing genre popularities over time, regional preference in game genres/platforms, upcoming developer etc. - **How:** The data set was scraped from the www.vgzchartz.com website using BeautifulSoup. The scraping script can be found here (https://github.com/GregorUT/vgchartzScrape) #### 2. Load Dataset ``` import pandas as pd import numpy as np import altair as alt from altair_saver import save alt.renderers.enable('mimetype') alt.data_transformers.enable('data_server') game = pd.read_csv("vgsales.csv") ``` #### 3. Explore Dataset ``` game.info() print("\nPlatform:\n",game.Platform.unique(),"\nCount: ",game.Platform.nunique()) print("\nYear\n",game.Year.unique(),"\nCount: ",game.Year.nunique()) print("\nGenre\n",game.Genre.unique(),"\nCount: ",game.Genre.nunique()) print("\nPublishers\n",game.Publisher.unique()[0:15],"\nCount: ",game.Publisher.nunique()) print(game.sort_values("NA_Sales",ascending=False).head(5).iloc[:,0:6]) print(game.sort_values("EU_Sales",ascending=False).head(5).iloc[:,0:6]) print(game.sort_values("JP_Sales",ascending=False).head(5).iloc[:,0:6]) print(game.sort_values("Global_Sales",ascending=False).head(5).iloc[:,0:6]) print(game.groupby("Year").size()) print(game.groupby("Genre").size()) print(game.groupby("Platform").size()) nulls = game[game.isna().any(axis=1)] #List of games with nulls in any field game.sort_values("Name").loc[game.Name.isin(game.Name[game.Name.duplicated()]),["Name","Platform"]].head(15) #Game titles that show up on multiple platforms ``` #### 4. Initial thoughts? - We have null values in Year and Publisher (Total of 307 rows with NaN values) - Year is a float, we could probably turn it into an int to make it prettier - We have 31 unique Platforms - We have 39 unique years (one being NaN) - We have 12 unique genres - We have 578 unique publishers - Looking at the top games in each region EU and NA are more similar in taste than JP -JP loves their Pokemon -Nintendo is the dominating Publisher in all regions - Years with the most releases are 2006 to 2011 with a peak of 1431 releases in 2009. #### 5. Wrangling The main concern would be the NaN values. I feel the data is still valuable, we can remove/deal with those values on a need basis. ``` game_melt game.Year = game.Year.astype("Int64") print(nulls.groupby("Year").size()) print(nulls.groupby("Genre").size()) print(nulls.groupby("Platform").size()) #For the analysis of sales - melting the NA,EU,JP,Other and Total columns game_melt = game.melt(id_vars=["Rank", "Name","Platform","Year","Genre","Publisher"], var_name="Region", value_name="Sales").reset_index(drop=True) ``` #### 6. Research Questions/Visualization+Analysis ``` #1) Basic Exploratory visualisations of things we noted in the Initial Thoughts #Counts of number of games in each genre, platform and number of games released in each year #Genre and Platform counts are coloured by number of counts and sorted from largest to smallest #Year counts are coloured by year and sorted from largest to smallest sorted_genre_count = list(game.groupby("Genre").size().sort_values(ascending=False).index) sorted_year_count = list(game.groupby("Year").size().sort_values(ascending=False).index) sorted_platform_count = list(game.groupby("Platform").size().sort_values(ascending=False).index) genre_count = alt.Chart(game).mark_bar().encode( alt.X("Genre",type="nominal",sort=sorted_genre_count), alt.Y("count()",title="Number of games",type="quantitative"), alt.Color("count()",scale=alt.Scale(scheme='category20b'),legend=None), alt.Tooltip("count()")) year_count = alt.Chart(game).mark_bar().encode( alt.X("Year",type="ordinal",sort=sorted_year_count), alt.Y("count()",title="Number of games",type="quantitative"), alt.Color("Year",scale=alt.Scale(scheme='category20c')), alt.Tooltip("count()")) platform_count = alt.Chart(game).mark_bar().encode( alt.X("Platform",type="nominal",sort=sorted_platform_count), alt.Y("count()",title="Number of games",type="quantitative"), alt.Color("count()",scale=alt.Scale(scheme='category20b'),legend=None), alt.Tooltip("count()")) count_plots = ((genre_count.properties(width=333)|platform_count.properties(width=666)).resolve_scale(color='independent') &year_count.properties(width=1000)).resolve_scale(color='independent').configure_axis( labelFontSize=12, titleFontSize=13) count_plots = count_plots.properties(title={"text": "Number of games released by Genre, Platform and Year", "subtitle": ["Counts only include games that have sold more than 100,000 copies. Bars for the Genre and Platform plot are coloured by number of games.",""]}).configure_title(fontSize = 25,subtitleFontSize=15) #2) Lets look at sales across Genres for each Region #Genres are sorted by decreasing Global Sales (Action is most sales vs Strategy is least) #Notice we see Shooters - while having fewer games released, still sold a lot of copies meaning their titles seemed to do well and the same (to a larger extent) can be said about Platformers. #Looking at the means of each genre, we can see exacly as we noticed above with the mean number of sales in the Shooter/Platform genre now ahead of the rest. #It is also interesting to see the trend across genres. We see NA, EU and Other sale patters tend to be more similar while JP sale patterns are distinct from the other regions, with a large emphasis on RPG, Platformers. sales_data = game_melt.loc[game_melt.Region != "Global_Sales",:] sorted_genre_totalsales = list(game.groupby("Genre").sum().sort_values("Global_Sales",ascending=False).index) genre_sales = alt.Chart(sales_data).mark_bar(opacity=0.5).encode( alt.X("Genre",type="nominal",sort=sorted_genre_totalsales), alt.Y("sum(Sales)",title="Total Number of Sales (in millions)",type="quantitative",stack=None), alt.Color("Region",scale=alt.Scale(scheme='set1'),type="nominal"), alt.Tooltip("Region")) genre_sales = genre_sales+genre_sales.mark_circle() genre_mean_sales = alt.Chart(sales_data).mark_bar(opacity=0.5).encode( alt.X("Genre",type="nominal",sort=sorted_genre_totalsales), alt.Y("mean(Sales)",title="Average Number of Sales (in millions)",type="quantitative",stack=None), alt.Color("Region",type="nominal"), alt.Tooltip("Region")) genre_mean_sales = genre_mean_sales + genre_mean_sales.mark_circle() genre_plots = (genre_sales | genre_mean_sales).properties(title={"text":"Comparing sales in each region by genre.","subtitle":["Bars and points represent number of sales in millions and coloured by Region.",""]}).configure_axis( labelFontSize=12, titleFontSize=13).configure_title(fontSize = 25,subtitleFontSize=15) #3) Lets investigate what's going on in each region. We can do so by looking at individual title by region and color the points by genre. Scatter plot: Top 3 games in each genre for each region #This might help us explain and point out individual game titles that are dominating their genre. sorted_genre_NA = list(sales_data[sales_data.Region=="NA_Sales"].groupby("Genre").sum().sort_values("Sales",ascending=False).index) sorted_genre_EU = list(sales_data[sales_data.Region=="EU_Sales"].groupby("Genre").sum().sort_values("Sales",ascending=False).index) sorted_genre_JP = list(sales_data[sales_data.Region=="JP_Sales"].groupby("Genre").sum().sort_values("Sales",ascending=False).index) sorted_genre_OT = list(sales_data[sales_data.Region=="Other_Sales"].groupby("Genre").sum().sort_values("Sales",ascending=False).index) genre_region = alt.Chart(sales_data).mark_bar(opacity=0.5).encode( alt.Y("Region",type="nominal",sort=sorted_genre_totalsales), alt.X("sum(Sales)",title="Total Number of Sales (in millions)",type="quantitative",stack=None), alt.Color("Region",scale=alt.Scale(scheme='set1'),type="nominal",legend=None)) title_NA =alt.Chart(sales_data[sales_data.Region=="NA_Sales"]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre_NA,title=None), alt.Y("Sales:Q",stack=None, title="Sales (in millions)"), alt.Color("Genre",scale=alt.Scale(scheme='category20')), alt.Tooltip("Name")) title_NA = title_NA + alt.Chart(sales_data[sales_data.Region=="NA_Sales"].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre_NA), alt.Y("Sales:Q"), text="Name").properties(title="NA Sales") title_JP =alt.Chart(sales_data[sales_data.Region=="JP_Sales"]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre_JP,title=None), alt.Y("Sales:Q",stack=None,title=None), alt.Color("Genre",scale=alt.Scale(scheme='category20')), alt.Tooltip("Name")) title_JP = title_JP + alt.Chart(sales_data[sales_data.Region=="JP_Sales"].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre_JP), alt.Y("Sales:Q"), text="Name").properties(title="JP Sales") title_EU =alt.Chart(sales_data[sales_data.Region=="EU_Sales"]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre_EU,title=None), alt.Y("Sales:Q",stack=None,title=None), alt.Color("Genre",scale=alt.Scale(scheme='category20')), alt.Tooltip("Name")) title_EU = title_EU + alt.Chart(sales_data[sales_data.Region=="EU_Sales"].sort_values("Sales",ascending=False).iloc[:10,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre_EU), alt.Y("Sales:Q"), text="Name").properties(title="EU Sales") title_OT =alt.Chart(sales_data[sales_data.Region=="Other_Sales"]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre_OT,title=None), alt.Y("Sales:Q",stack=None,title=None), alt.Color("Genre",scale=alt.Scale(scheme='category20')), alt.Tooltip("Name")) title_OT = title_OT + alt.Chart(sales_data[sales_data.Region=="Other_Sales"].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre_OT), alt.Y("Sales:Q"), text="Name").properties(title="Other Sales") name_sales = (title_NA.properties(width=250) | title_EU.properties(width=250) | title_JP.properties(width=250) | title_OT.properties(width=250)) name_sales = name_sales & genre_region.properties(width=1210).resolve_scale(color='independent') name_sales = name_sales.properties(title={"text":"Distribution of games titles across each Region by Genre.","subtitle":["Top 5 games in each region are labelled. Overall Regional perfomance shown below.",""]}).configure_axis( labelFontSize=12, titleFontSize=13).configure_title(fontSize = 20,subtitleFontSize=15) #4) Growth of Game Genres, Publishers and Platforms over the Years genre_data = pd.Series(game.groupby(['Year','Genre']).size().groupby('Year').size(), name='Genre') pub_data = pd.Series(game.groupby(['Year','Publisher']).size().groupby('Year').size(), name='Publisher') plat_data = pd.Series(game.groupby(['Year','Platform']).size().groupby('Year').size(), name='Platform') rel_data = pd.concat([genre_data,pub_data,plat_data], axis=1).reset_index() plot4=alt.Chart(rel_data).transform_fold(['Genre','Publisher','Platform'] ).mark_bar(point=True).encode( x='Year:O', y=alt.Y('value:Q', axis=alt.Axis(title='Number of Genre/Publishers/Platforms')), color=alt.Color('key:N', legend=alt.Legend(title='Feature')) ).properties(width=1000) #5) Maximum Annual Global Sales Numbers plot5=alt.Chart(game).mark_line(point=True).encode( x='Year:N', y=alt.Y('max(Global_Sales):Q', axis=alt.Axis(title='Maximum Annual Global Sales')) ).properties(width=1000) ``` 1) **Basic Exploratory visualizations of things we noted in the 'Initial Thoughts'** - Counts of number of games in each genre, platform and number of games released in each year - Genre and Platform counts are colored by number of counts and sorted from largest to smallest - Year counts are colored by year and sorted from largest to smallest - We see Action is quite ahead of other genres in terms of numbers of games released, followed by Sports - We see DS and PS2 have the highest number of games released in terms of Platform. - The years 2006-2011 have the highest number of game released. There are a ton of different ways to analyze this data set, lets just look at Genres for now. ``` count_plots ``` 2) **Sales across Genres for each Region** - Genres are sorted by decreasing Global Sales (Action is most sales vs Strategy is least) - Notice we see Shooters - while having fewer games released, still sold a lot of copies meaning their titles seemed to do well and the same (to a larger extent) can be said about Platformers. - Looking at the means of each genre, we can see exactly as we noticed above with the mean number of sales in the Shooter/Platform genre now ahead of the rest. - It is also interesting to see the trend across genres. We see NA, EU and Other sale patters tend to be more similar while JP sale patterns are distinct from the other regions, with a large emphasis on RPG, Platformers. Lets investigate what's going on in each region. We can do so by looking at individual titles by region. This might help us explain and point out individual game titles that are dominating their genre. ``` genre_plots ``` 3) **Distribution of game titles by Genre across each Region** - The top 5 titles in each region across genres are labeled. The genres are also sorted by most sales (left) to least sales (right). - Just by looking at the scales, it seems NA sells the most copies, followed by EU, JP and Other. This is also re-enforced by the bar plot below showing the total number of copies sold (in millions) by region. - Looking at genres across regions, we see NA, EU and Other are similar (as concluded from the plot before). While Action and Sports are still in the Top 3 genres for JP, we see RPG being first and Shooters being last. - We see NA, EU and Other having similar Top 5 games, all having Wii Sports and Mario Kart. - The JP region seems to love their RPG games which makes sense as they even have a subcategory named after them (JRPGs - Japanese RPGs) - We see the bulk of games in most regions (except Other) lie around or below the 5 million copies sold mark and relatively few titles make it above. ``` name_sales ``` 4) **Growth of Game Genres, Publishers and Platforms over the Years** - From our dataset we see that the year around 2010 were of highest games sales activity. - In 1980 we had only two game companies/publishers make five genre of games. - Lots of different publishers entered the gaming market with over 180 gaming companies around 2010 offering different gaming products. - Its interesting to see that the number of gaming consoles/platforms (like Xbox, PS5) have not increased. These consoles need bigger investment and only big gaming industry players held that market segment. ``` plot4 ``` 5) **Maximum Annual Global Sales Numbers** ``` plot5 ``` #### 7. Future Studies 1) **Revenue and marketing model of games** While this dataset can give us a general idea of popularity of games across years, genres, publishers and platforms, it does not necessarily tell us which genre is most profitable in terms of revenue. Currently, one of the largest gaming market is `Mobile Gaming` which would not have a metric such as `copies sold` but rather `downloads`. We also know some of the biggest games such as League of Legends, and DOTA 2 run off a "free to play" model for PC so they would also not have a "copies sold" metric. On top of that, a game that is free, take League of Legends for example, will make money through micro transactions (for example they made $1.75 billion dollars in 2020). As a more accurate analysis of how platforms, titles, developers and genres are performing in terms of revenue, popularity and activity, we might want to expand and improve the quality and type of data we are looking at. Certain things that we could use as better metrics: - Generic things such as: Game title, year of release, company, genre, platform - We could look at copies sold AND downloads - We could look at concurrent players (as to get a sense of the size of the active player base) *This would be very useful in comparing multi player games and even peak hours of gaming - The payment model of the game (single time purchase, subscription based, free-to-play) - Reported revenue (could be trickier to find) - We can also include things like development cost (could be trickier to find) - Reviews/General consensus This could be very helpful from a companies perspective to narrow down on a target audience and possible find niche areas of the gaming community that show promise. 2) **Viewership and Entertainment** We could also expand our data set to analyze things in the streaming industry, where there is also big money to be had. Now a days, it seems like many people dream of being a streamer. Just to sit at home and play games all day while raking in large amounts of money, but it would be interesting to see certain stats such as, how much money do you need to break even (average living cost vs income from streaming broken down into views, subs, donations etc.) and to see how many people on platforms such as YouTube gaming, Twitch, Facebook gaming meet this threshold of 'success'. Things you would need would be: - List of streamers, platform they stream on, hours streamed, revenue You could also look to see if certain genre of games or certain game titles themselves lead to better chances of success. For this you would need to further breakdown streamer information by game played, time streamed, views, donations and subscriptions received during that stream. Obviously, I feel a lot of this data would be difficult to obtain and that the success of a streamer is heavily influenced by personality, but it would be interesting to see if there is anything that can be identified to give upcoming streamers a slight advantage against big names. ``` sales_data_platform sales_data[(sales_data.Region=="EU_Sales") & (sales_data.Genre=="Action")].sort_values("Sales",ascending=False) max_year = 2020 sales_data = game_melt.loc[game_melt.Region != "Global_Sales",:] sales_data_platform = sales_data.groupby(["Platform","Year","Genre","Region"]).sum().reset_index()[["Platform","Year","Genre","Region","Sales"]] sales_data_platform sales_data_platform[sales_data_platform.Year <= max_year].groupby(["Platform","Genre","Region"]).sum().reset_index()[["Platform","Genre","Region","Sales"]] list(sales_data[(sales_data.Region==region_filter) & (sales_data.Year <= max_year)].groupby("Genre").sum().sort_values("Sales",ascending=False).index) sales_data_platform = sales_data.groupby(["Platform","Year","Genre","Region"]).sum().reset_index()[["Platform","Year","Genre","Region","Sales"]] max_year = 1999 region_filter = "NA_Sales" def platform_plot(region_filter,max_year=2020): sorted_genre = list(sales_data[(sales_data.Region==region_filter) & (sales_data.Year <= max_year)].groupby("Genre").sum().sort_values("Sales",ascending=False).index) filtered_set = sales_data_platform[sales_data_platform.Year <= max_year].groupby(["Platform","Genre","Region"]).sum().reset_index()[["Platform","Genre","Region","Sales"]] chart=alt.Chart(filtered_set[filtered_set.Region==region_filter]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre,title=None), alt.Y("Sales:Q",stack=None, title="Sales (in millions)"), alt.Color("Genre",scale=alt.Scale(scheme='category20')), alt.Tooltip("Platform")) chart = chart + alt.Chart(filtered_set[filtered_set.Region==region_filter].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre), alt.Y("Sales:Q"), text="Platform") return chart x = platform_plot(region_filter,max_year) x # chart = chart + alt.Chart(sales_data_platform[sales_data_platform.Region==region_filter].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( # alt.X("Genre",sort=sorted_genre), # alt.Y("Sales:Q"), # text="Name") sales_data_publisher = sales_data.groupby(["Publisher","Year","Genre","Region"]).sum().reset_index()[["Publisher","Year","Genre","Region","Sales"]] max_year = 2020 region_filter = "NA_Sales" def test(region_filter,max_year=2020): sorted_genre = list(sales_data[(sales_data.Region==region_filter) & (sales_data.Year <= max_year)].groupby("Genre").sum().sort_values("Sales",ascending=False).index) filtered_set = sales_data_publisher[sales_data_publisher.Year <= max_year].groupby(["Publisher","Genre","Region"]).sum().reset_index()[["Publisher","Genre","Region","Sales"]] chart=alt.Chart(filtered_set[filtered_set.Region==region_filter]).mark_circle(size=50).encode( alt.X("Genre",sort=sorted_genre,title=None), alt.Y("Sales:Q",stack=None, title="Sales (in millions)"), alt.Color("Genre",scale=alt.Scale(scheme='category20'),legend=None), alt.Tooltip("Publisher")) chart = chart + alt.Chart(filtered_set[filtered_set.Region==region_filter].sort_values("Sales",ascending=False).iloc[:5,]).mark_text(align = "left", dx=10).encode( alt.X("Genre",sort=sorted_genre), alt.Y("Sales:Q"), text="Publisher") return chart x = test(region_filter,max_year) x sales_data_publisher = sales_data.groupby(["Publisher","Year","Genre","Region"]).sum().reset_index()[["Publisher","Year","Genre","Region","Sales"]] sales_data_publisher filtered_set = sales_data[sales_data.Year <= max_year].groupby(["Name","Genre","Region"]).sum().reset_index()[["Name","Genre","Region","Sales"]] filtered_set ```
github_jupyter
# Nature of signals In the context of this class, a signal is the data acquired by the measurement system. It contains much information that we need to be able to identify to extract knowledge about the system being tested and how to optimize the measurements. A signal caries also messages and information. We will use the content of this module for the other modules seen in the rest of the class. ## Signal classification A signal can be characterized by its amplitude and frequency. __Amplitude__ is related to the strength of the signal and __frequency__ to the extent or duration of the signal. The time series of a signal is called a __waveform__. Multipe collection of the waveform is called an __ensemble__. Signals can be either __deterministic__ or __random__. Deterministic signals can be either __static__ (do not change in time) or __dynamic__. Dynamic signals can be decomposed into __periodic__ or __aperiodic__. A periodic signal repeats itself at regular interval. The smallest value over whih it repeats itself is the __fundamental period__, with an associated __fundamental frequency__. A __simple__ periodic signal has one period; it is a sine wave. A __complex__ has multiple periods and can be thought as the sum of several sinusoids (more on this in the next section). Aperiodic signals are typically __transient__ (such as step, ramp, or pulse responses). Nondeterministic signals are an important class of signals that are often encountered in nature (think of turbulence, stock market, etc). They must be analyzed with satistical tools. They are classified as __nonstationary__ and __stationary__. This classification enables to select the proper statistical theory to analyze them. The properties of nondeterministic signals are computed with ensemble statistics of instantaneous properties. In particular, one computes the ensemble average, $\mu(t_1)$, and ensemble autocorrelation function (more on the physical meaning of this function later), $R(t_1,t_1+\tau)$. \begin{align*} \mu(t_1) & = \frac{1}{N} \sum_{i=0}^{N-1} x_i(t_1) \\ R(t_1,t_1+\tau) & = \frac{1}{N} \sum_{i=0}^{N-1} x_i(t_1)x_i(t_1+\tau) \end{align*} The term ensemble means that we take N time series and perform statistics with the ensemble of the values at recorded time $t_1$. If $\mu(t_1) = \mu$ and $R(t_1,t_1+\tau) = R(\tau)$, then the signal is considered (weakly) __stationary__ and nonstationary, otherwise. Stationarity introdcues a lot of simplification in the statistical analysis of the data (by using a lot of tools developed for time series analysis) and one should always start by checking for signal stationarity. Stationarity implies that signal ensemble-averaged statistical properties are independent of $t_1$. For most stationary signals, the temporal and ensemble statistical properties are identical. The signal is then __ergodic__. Thus, from a _single_ time history of length $T_r$ one can calculate $\mu$ and $R(\tau)$ (which saves time in the acquisition and analysis): \begin{align*} \mu & = \frac{1}{T_r} \int_{0}^{T_r} x(t) dt \\ R(\tau) & = \frac{1}{T_r} \int_{0}^{T_r} x(t)x(t+\tau) dt \end{align*} Thanks to statistical tools for ergodic processes, from a finite recording length of the signal, one can estimate population mean with confidence level. ## Signal variables Most signals can be decomposed as a sum of sines and cosines (more on this in the next module). Let's start with a simple periodic signal: \begin{align*} y(t) = C \sin (n \omega t + \phi) = C \sin (n 2\pi f t + \phi) \end{align*} When several sine and cosine waves are added, complext waveforms result. For example for second order dynamic system, the system response could take the form: \begin{align*} y(t) = A \cos (\omega t) + B \sin (\omega t) \end{align*} This sum of a cosine and sine of same frequency can be rearranged as: \begin{align*} y(t) = C \cos (\omega t - \phi) = C \cos (\omega t - \phi + \pi/2) = C \sin (\omega t + \phi') \end{align*} with: \begin{align*} C & = \sqrt{A^2 + B^2}\\ \phi & = \tan^{-1} (B/A)\\ \phi' & = \pi/2 - \phi = \tan^{-1} (A/B) \end{align*} Let's look at some examples of simple and complex periodic signals. First a simple function: \begin{align*} y (t) = 2 \sin (2\pi t) \end{align*} ``` import numpy from matplotlib import pyplot %matplotlib inline t=numpy.linspace(0.0,5.0,num=1000) # (s) y = 2 * numpy.sin(2*numpy.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); ``` Now a complex function made of two frequencies (harmonics): \begin{align*} y (t) = 2 \sin (2\pi t) + 1.2 \sin (6 \pi t) \end{align*} The signal has two frequencies: 1 and 3 Hz. 1 Hz is the lowest frequency and is the fundamental frequency with period 1 s. So the signal will repeat itself every second. ``` y = 2 * numpy.sin(2*numpy.pi*t) + 1.2 * numpy.sin(6*numpy.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); ``` Let's now look at two sinusoidal with very close frequencies $\Delta f$. \begin{align*} y (t) = 2 \sin (2\pi t) + 1.2 \sin ((2+0.2) \pi t) \end{align*} ``` t=numpy.linspace(0.0,20.0,num=1000) # (s) y = 2 * numpy.sin(2*numpy.pi*t) + 1.2 * numpy.sin((2+0.2)*numpy.pi*t) pyplot.plot(t, y, color='b', linestyle='-'); ``` Here the frequency difference is $\Delta f = 0.2/2 = 0.1 Hz$. The resulting signal has a slow beat with __beat__ frequency $\Delta f)$ or beat period $1/\Delta f = 10$ s, i.e. the signal repepats itself every 10 s. Analytically (using trigonometric relations), one can show that the sum of two sine waves with close frequencies results in a signal modulated by $\cos(\Delta f/2)$. ## Detection schemes The mixing of two signals to produce a signal (wave) with a new frequency is called heterodyning and is commonly used in instrumentation to obtain very accurate measurements. __Heterodyne detection__ shifts the frequency content of a signal into a new range where it is easier to detected; in communucation it is called _frequency conversion_. Heterodyning is used in laser Doppler velocimetry, tuning of musical instruments, radio receivers, etc. In contrast, __homodyne detection__ uses a single (homo) frequency and compares the signal with a standard oscillation that would be identical to the signal if it carried null information. and measures the amplitude and phase of a signal to gain information. It enables to extract information encoded as modulation of the phase and/or frequency of the signal. In optics, this results in interferometry. It is also the fundation behind lock-in amplifier to extract information for very weak or noisy signals. Finally in __magnitude detection__ one only records the amplitude of signals. This is the most common detection scheme used. ## Statistical description of signals
github_jupyter
# Navigation --- You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started! ### 1. Start the Environment Run the next code cell to install a few packages. This line will take a few minutes to run! ``` !pip -q install ./python ``` The environment is already saved in the Workspace and can be accessed at the file path provided below. Please run the next code cell without making any changes. ``` from unityagents import UnityEnvironment import numpy as np # please do not modify the line below env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64") import gym !pip3 install box2d import random import torch import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline !python -m pip install pyvirtualdisplay from pyvirtualdisplay import Display display = Display(visible=0, size=(1400, 900)) display.start() is_ipython = 'inline' in plt.get_backend() if is_ipython: from IPython import display plt.ion() ``` Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. ``` # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] ``` ### 2. Examine the State and Action Spaces Run the code cell below to print some information about the environment. ``` # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size) ``` ### 3. Take Random Actions in the Environment In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. Note that **in this coding environment, you will not be able to watch the agent while it is training**, and you should set `train_mode=True` to restart the environment. ``` env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score while True: action = np.random.randint(action_size) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Score: {}".format(score)) ``` When finished, you can close the environment. ### 4. It's Your Turn! Now it's your turn to train your own agent to solve the environment! A few **important notes**: - When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: ```python env_info = env.reset(train_mode=True)[brain_name] ``` - To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook. - In this coding environment, you will not be able to watch the agent while it is training. However, **_after training the agent_**, you can download the saved model weights to watch the agent on your own machine! # Training the netowrk ``` from dqn_agent import Agent agent = Agent(state_size=37, action_size=4, seed=42) print(type(state)) def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995): """Deep Q-Learning. Params ====== n_episodes (int): maximum number of training episodes max_t (int): maximum number of timesteps per episode eps_start (float): starting value of epsilon, for epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon """ scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores eps = eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] score = 0 for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] reward = env_info.rewards[0] done = env_info.local_done[0] agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_window.append(score) # save most recent score scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=15.0: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores scores = dqn() # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show() env.close() ```
github_jupyter
``` !pip install mysql-connector-python import mysql.connector as connection try: mydb = connection.connect(host="localhost",user="root", passwd="mysql",use_pure=True) # check if the connection is established query = "SHOW DATABASES" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) #print(cursor.fetchall()) except Exception as e: mydb.close() print(str(e)) import mysql.connector as connection conn = connection.connect(host="localhost",user="root", passwd="mysql",use_pure=True) cur = conn.cursor() cur.execute("create database sudhanshu12345") res = cur.fetchall() res for i in res : print(i[0]) import mysql.connector as connection try: mydb = connection.connect(host="localhost", user="root", passwd="mysql",use_pure=True) # check if the connection is established print(mydb.is_connected()) query = "Create database Student;" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) print("Database Created!!") mydb.close() except Exception as e: mydb.close() print(str(e)) import mysql.connector as connection try: mydb = connection.connect(host="localhost", database = 'sudhanshu12345',user="root", passwd="mysql",use_pure=True) # check if the connection is established print(mydb.is_connected()) query = "CREATE TABLE StudentDetails (Studentid INT(10) AUTO_INCREMENT PRIMARY KEY,FirstName VARCHAR(60)," \ "LastName VARCHAR(60), RegistrationDate DATE,Class Varchar(20), Section Varchar(10))" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) print("Table Created!!") mydb.close() except Exception as e: mydb.close() print(str(e)) mydb = connection.connect(host="localhost", database = 'sudhanshu12345',user="root", passwd="mysql",use_pure=True) mydb.is_connected() cur = mydb.cursor() cur.execute("create table test(x1 INT(5) , x2 VARCHAR(20) ,x3 DATE)") mydb.close() import mysql.connector as connection try: mydb = connection.connect(host="localhost", database = 'Student',user="root", passwd="mysql",use_pure=True) # check if the connection is established print(mydb.is_connected()) query = "INSERT INTO StudentDetails VALUES ('1132','Sachin','Kumar','1997-11-11','Eleventh','A')" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) print("Values inserted into the table!!") mydb.commit() mydb.close() except Exception as e: mydb.close() print(str(e)) mydb = connection.connect(host="localhost", database = 'sudhanshu12345',user="root", passwd="mysql",use_pure=True) cur = mydb.cursor() cur.execute("insert into test values(4564,'sudh' , '2021-05-15')") #yyyy-mm-dd mydb.commit() cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") cur.execute("insert into test values(4545,'sudhdfgd' , '2021-05-15')") import mysql.connector as connection try: mydb = connection.connect(host="localhost", database = 'GlassData',user="root", passwd="mysql",use_pure=True) #check if the connection is established print(mydb.is_connected()) query = "Select * from GlassData;" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) for result in cursor.fetchall(): print(result) mydb.close() #close the connection except Exception as e: #mydb.close() print(str(e)) cur1 = mydb.cursor() cur1.execute("select * from test") for i in cur1.fetchall(): print(i) cur = mydb.cursor() cur.execute("select x1, x2 from test") for i in cur.fetchall(): print(i) import mysql.connector as connection import pandas as pandas try: mydb = connection.connect(host="localhost", database='GlassData', user="root", passwd="mysql", use_pure=True) # check if the connection is established print(mydb.is_connected()) query = "Select * from GlassData;" result_dataFrame = pandas.read_sql(query,mydb) print(result_dataFrame) mydb.close() # close the connection except Exception as e: #mydb.close() print(str(e)) import pandas as pd mydb a = pd.read_sql("select x3,x1 from test",mydb) a.to_csv("mydata.csv") mydb = connection.connect(host="localhost", database='sudhanshu12345', user="root", passwd="mysql", use_pure=True) mydb cur = mydb.cursor() cur.execute("create table mydata(n1 int(20) , n2 varchar(20) , n3 date)") data = pd.read_csv('mydata.csv') data data1 = pd.read_sql('select * from test' , mydb) data1.to_sql('mydata1',mydb) from sqlalchemy import create_engine >>> engine = create_engine('sqlite://', echo=False) Create a table from scratch with 3 rows. >>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']}) >>> df name 0 User 1 1 User 2 2 User 3 >>> df.to_sql('users', con=engine) >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3')] An `sqlalchemy.engine.Connection` can also be passed to to `con`: >>> with engine.begin() as connection: ... df1 = pd.DataFrame({'name' : ['User 4', 'User 5']}) ... df1.to_sql('users', con=connection, if_exists='append') This is allowed to support operations that require that the same DBAPI connection is used for the entire operation. >>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']}) >>> df2.to_sql('users', con=engine, if_exists='append') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 1'), (1, 'User 2'), (2, 'User 3'), (0, 'User 4'), (1, 'User 5'), (0, 'User 6'), (1, 'User 7')] Overwrite the table with just ``df2``. >>> df2.to_sql('users', con=engine, if_exists='replace', ... index_label='id') >>> engine.execute("SELECT * FROM users").fetchall() [(0, 'User 6'), (1, 'User 7')] Specify the dtype (especially useful for integers with missing values). Notice that while pandas is forced to store the data as floating point, the database supports nullable integers. When fetching the data with Python, we get back integer scalars. >>> df = pd.DataFrame({"A": [1, None, 2]}) >>> df A 0 1.0 1 NaN 2 2.0 >>> from sqlalchemy.types import Integer >>> df.to_sql('integers', con=engine, index=False, ... dtype={"A": Integer()}) import mysql.connector as connection import pandas as pandas import csv try: mydb = connection.connect(host="localhost", user="root", passwd="mysql",use_pure=True) #check if the connection is established print(mydb.is_connected()) #create a new database query = "Create database GlassData;" cursor = mydb.cursor() #create a cursor to execute queries cursor.execute(query) print("Database Created!!") mydb.close() #close the connection #Establish a new connection to the database created above mydb = connection.connect(host="localhost", database = 'GlassData',user="root", passwd="mysql", use_pure=True) #create a new table to store glass data query = "CREATE TABLE IF NOT EXISTS GlassData (Index_Number INT(10),RI float(10,5), Na float(10,5), Mg float(10,5),Al float(10,5)," \ " Si float(10,5), K float(10,5), Ca float(10,5), Ba float(10,5), Fe float(10,5), Class INT(5))" cursor = mydb.cursor() # create a cursor to execute queries cursor.execute(query) print("Table Created!!") #read from the file with open('glass.data', "r") as f: next(f) glass_data = csv.reader(f, delimiter="\n") for line in enumerate(glass_data): for list_ in (line[1]): cursor.execute('INSERT INTO GlassData values ({values})'.format(values=(list_))) print("Values inserted!!") mydb.commit() cursor.close() mydb.close() except Exception as e: #mydb.close() print(str(e)) import mysql.connector as connection import pandas as pandas import csv mydb = connection.connect(host="localhost", user="root", passwd="mysql",use_pure=True) cur = mydb.cursor() cur.execute("CREATE TABLE sudhanshu12345.GlassData1 (Index_Number INT(10),RI float(10,5), Na float(10,5), Mg float(10,5),Al float(10,5)," \ " Si float(10,5), K float(10,5), Ca float(10,5), Ba float(10,5), Fe float(10,5), Class INT(5))") with open('glass.data',"r") as data : next(data) data_csv = csv.reader(data, delimiter= "\n") print(data_csv) for i in enumerate(data_csv): print(i) for j in i[1] : cur.execute('insert into sudhanshu12345.GlassData1 values ({data})'.format(data=(j))) print("all the data inserted ") mydb.commit() !ls with open('glass.data',"r") as data : next(data) data_csv = csv.reader(data, delimiter= "\n") print(data_csv) for i in enumerate(data_csv): print(i) for j in i[1] : print(type(j)) cur.execute('insert into sudhanshu12345.GlassData1 values ({data})'.format(data=(j))) print("all the data inserted ") mydb.commit() with open('glass.data',"r") as data : next(data) data_csv = csv.reader(data, delimiter= "\n") print(data_csv) for j in data_csv : cur.execute(f'insert into sudhanshu12345.GlassData values (str(j[0]))') print("all the data inserted ") mydb.commit() https://archive.ics.uci.edu/ml/machine-learning-databases/00448/ ```
github_jupyter
# Sentiment Analysis ## Using XGBoost in SageMaker _Deep Learning Nanodegree Program | Deployment_ --- As our first example of using Amazon's SageMaker service we will construct a random tree model to predict the sentiment of a movie review. You may have seen a version of this example in a pervious lesson although it would have been done using the sklearn package. Instead, we will be using the XGBoost package as it is provided to us by Amazon. ## Instructions Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully! In addition to implementing code, there may be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell. > **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted. ## Step 1: Downloading the data The dataset we are going to use is very popular among researchers in Natural Language Processing, usually referred to as the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/). It consists of movie reviews from the website [imdb.com](http://www.imdb.com/), each labeled as either '**pos**itive', if the reviewer enjoyed the film, or '**neg**ative' otherwise. > Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011. We begin by using some Jupyter Notebook magic to download and extract the dataset. ``` %mkdir ../data !wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz !tar -zxf ../data/aclImdb_v1.tar.gz -C ../data ``` ## Step 2: Preparing the data The data we have downloaded is split into various files, each of which contains a single review. It will be much easier going forward if we combine these individual files into two large files, one for training and one for testing. ``` import os import glob def read_imdb_data(data_dir='../data/aclImdb'): data = {} labels = {} for data_type in ['train', 'test']: data[data_type] = {} labels[data_type] = {} for sentiment in ['pos', 'neg']: data[data_type][sentiment] = [] labels[data_type][sentiment] = [] path = os.path.join(data_dir, data_type, sentiment, '*.txt') files = glob.glob(path) for f in files: with open(f) as review: data[data_type][sentiment].append(review.read()) # Here we represent a positive review by '1' and a negative review by '0' labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0) assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \ "{}/{} data size does not match labels size".format(data_type, sentiment) return data, labels data, labels = read_imdb_data() print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format( len(data['train']['pos']), len(data['train']['neg']), len(data['test']['pos']), len(data['test']['neg']))) from sklearn.utils import shuffle def prepare_imdb_data(data, labels): """Prepare training and test sets from IMDb movie reviews.""" #Combine positive and negative reviews and labels data_train = data['train']['pos'] + data['train']['neg'] data_test = data['test']['pos'] + data['test']['neg'] labels_train = labels['train']['pos'] + labels['train']['neg'] labels_test = labels['test']['pos'] + labels['test']['neg'] #Shuffle reviews and corresponding labels within training and test sets data_train, labels_train = shuffle(data_train, labels_train) data_test, labels_test = shuffle(data_test, labels_test) # Return a unified training data, test data, training labels, test labets return data_train, data_test, labels_train, labels_test train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels) print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X))) train_X[100] ``` ## Step 3: Processing the data Now that we have our training and testing datasets merged and ready to use, we need to start processing the raw data into something that will be useable by our machine learning algorithm. To begin with, we remove any html formatting that may appear in the reviews and perform some standard natural language processing in order to homogenize the data. ``` import nltk nltk.download("stopwords") from nltk.corpus import stopwords from nltk.stem.porter import * stemmer = PorterStemmer() import re from bs4 import BeautifulSoup def review_to_words(review): text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case words = text.split() # Split string into words words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords words = [PorterStemmer().stem(w) for w in words] # stem return words import pickle cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists def preprocess_data(data_train, data_test, labels_train, labels_test, cache_dir=cache_dir, cache_file="preprocessed_data.pkl"): """Convert each review to words; read from cache if available.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = pickle.load(f) print("Read preprocessed data from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Preprocess training and test data to obtain words for each review #words_train = list(map(review_to_words, data_train)) #words_test = list(map(review_to_words, data_test)) words_train = [review_to_words(review) for review in data_train] words_test = [review_to_words(review) for review in data_test] # Write to cache file for future runs if cache_file is not None: cache_data = dict(words_train=words_train, words_test=words_test, labels_train=labels_train, labels_test=labels_test) with open(os.path.join(cache_dir, cache_file), "wb") as f: pickle.dump(cache_data, f) print("Wrote preprocessed data to cache file:", cache_file) else: # Unpack data loaded from cache file words_train, words_test, labels_train, labels_test = (cache_data['words_train'], cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test']) return words_train, words_test, labels_train, labels_test # Preprocess data train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y) ``` ### Extract Bag-of-Words features For the model we will be implementing, rather than using the reviews directly, we are going to transform each review into a Bag-of-Words feature representation. Keep in mind that 'in the wild' we will only have access to the training set so our transformer can only use the training set to construct a representation. ``` import numpy as np from sklearn.feature_extraction.text import CountVectorizer from sklearn.externals import joblib # joblib is an enhanced version of pickle that is more efficient for storing NumPy arrays def extract_BoW_features(words_train, words_test, vocabulary_size=5000, cache_dir=cache_dir, cache_file="bow_features.pkl"): """Extract Bag-of-Words for a given set of documents, already preprocessed into words.""" # If cache_file is not None, try to read from it first cache_data = None if cache_file is not None: try: with open(os.path.join(cache_dir, cache_file), "rb") as f: cache_data = joblib.load(f) print("Read features from cache file:", cache_file) except: pass # unable to read from cache, but that's okay # If cache is missing, then do the heavy lifting if cache_data is None: # Fit a vectorizer to training documents and use it to transform them # NOTE: Training documents have already been preprocessed and tokenized into words; # pass in dummy functions to skip those steps, e.g. preprocessor=lambda x: x vectorizer = CountVectorizer(max_features=vocabulary_size, preprocessor=lambda x: x, tokenizer=lambda x: x) # already preprocessed features_train = vectorizer.fit_transform(words_train).toarray() # Apply the same vectorizer to transform the test documents (ignore unknown words) features_test = vectorizer.transform(words_test).toarray() # NOTE: Remember to convert the features using .toarray() for a compact representation # Write to cache file for future runs (store vocabulary as well) if cache_file is not None: vocabulary = vectorizer.vocabulary_ cache_data = dict(features_train=features_train, features_test=features_test, vocabulary=vocabulary) with open(os.path.join(cache_dir, cache_file), "wb") as f: joblib.dump(cache_data, f) print("Wrote features to cache file:", cache_file) else: # Unpack data loaded from cache file features_train, features_test, vocabulary = (cache_data['features_train'], cache_data['features_test'], cache_data['vocabulary']) # Return both the extracted features as well as the vocabulary return features_train, features_test, vocabulary # Extract Bag of Words features for both training and test datasets train_X, test_X, vocabulary = extract_BoW_features(train_X, test_X) ``` ## Step 4: Classification using XGBoost Now that we have created the feature representation of our training (and testing) data, it is time to start setting up and using the XGBoost classifier provided by SageMaker. ### (TODO) Writing the dataset The XGBoost classifier that we will be using requires the dataset to be written to a file and stored using Amazon S3. To do this, we will start by splitting the training dataset into two parts, the data we will train the model with and a validation set. Then, we will write those datasets to a file and upload the files to S3. In addition, we will write the test set input to a file and upload the file to S3. This is so that we can use SageMakers Batch Transform functionality to test our model once we've fit it. ``` import pandas as pd from sklearn.model_selection import train_test_split # TODO: Split the train_X and train_y arrays into the DataFrames val_X, train_X and val_y, train_y. Make sure that # val_X and val_y contain 10 000 entires while train_X and train_y contain the remaining 15 000 entries. X = train_X y = train_y train_X, val_X, train_y, val_y = train_test_split(X, y, test_size=10000, random_state=42) ``` The documentation for the XGBoost algorithm in SageMaker requires that the saved datasets should contain no headers or index and that for the training and validation data, the label should occur first for each sample. For more information about this and other algorithms, the SageMaker developer documentation can be found on __[Amazon's website.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ ``` # First we make sure that the local directory in which we'd like to store the training and validation csv files exists. data_dir = '../data/xgboost' if not os.path.exists(data_dir): os.makedirs(data_dir) # First, save the test data to test.csv in the data_dir directory. Note that we do not save the associated ground truth # labels, instead we will use them later to compare with our model output. pd.DataFrame(test_X).to_csv(os.path.join(data_dir, 'test.csv'), header=False, index=False) # TODO: Save the training and validation data to train.csv and validation.csv in the data_dir directory. # Make sure that the files you create are in the correct format. pd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X)], axis=1).to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False) pd.concat([pd.DataFrame(val_y), pd.DataFrame(val_X)], axis=1).to_csv(os.path.join(data_dir, 'validation.csv'), header=False, index=False) # To save a bit of memory we can set text_X, train_X, val_X, train_y and val_y to None. test_X = train_X = val_X = train_y = val_y = None ``` ### (TODO) Uploading Training / Validation files to S3 Amazon's S3 service allows us to store files that can be access by both the built-in training models such as the XGBoost model we will be using as well as custom models such as the one we will see a little later. For this, and most other tasks we will be doing using SageMaker, there are two methods we could use. The first is to use the low level functionality of SageMaker which requires knowing each of the objects involved in the SageMaker environment. The second is to use the high level functionality in which certain choices have been made on the user's behalf. The low level approach benefits from allowing the user a great deal of flexibility while the high level approach makes development much quicker. For our purposes we will opt to use the high level approach although using the low-level approach is certainly an option. Recall the method `upload_data()` which is a member of object representing our current SageMaker session. What this method does is upload the data to the default bucket (which is created if it does not exist) into the path described by the key_prefix variable. To see this for yourself, once you have uploaded the data files, go to the S3 console and look to see where the files have been uploaded. For additional resources, see the __[SageMaker API documentation](http://sagemaker.readthedocs.io/en/latest/)__ and in addition the __[SageMaker Developer Guide.](https://docs.aws.amazon.com/sagemaker/latest/dg/)__ ``` import sagemaker session = sagemaker.Session() # Store the current SageMaker session # S3 prefix (which folder will we use) prefix = 'sentiment-xgboost' # TODO: Upload the test.csv, train.csv and validation.csv files which are contained in data_dir to S3 using sess.upload_data(). test_location = session.upload_data(os.path.join(data_dir, 'test.csv'),key_prefix=prefix) val_location = session.upload_data(os.path.join(data_dir, 'validation.csv'),key_prefix=prefix) train_location = session.upload_data(os.path.join(data_dir, 'train.csv'),key_prefix=prefix) ``` ### (TODO) Creating the XGBoost model Now that the data has been uploaded it is time to create the XGBoost model. To begin with, we need to do some setup. At this point it is worth discussing what a model is in SageMaker. It is easiest to think of a model of comprising three different objects in the SageMaker ecosystem, which interact with one another. - Model Artifacts - Training Code (Container) - Inference Code (Container) The Model Artifacts are what you might think of as the actual model itself. For example, if you were building a neural network, the model artifacts would be the weights of the various layers. In our case, for an XGBoost model, the artifacts are the actual trees that are created during training. The other two objects, the training code and the inference code are then used the manipulate the training artifacts. More precisely, the training code uses the training data that is provided and creates the model artifacts, while the inference code uses the model artifacts to make predictions on new data. The way that SageMaker runs the training and inference code is by making use of Docker containers. For now, think of a container as being a way of packaging code up so that dependencies aren't an issue. ``` from sagemaker import get_execution_role # Our current execution role is require when creating the model as the training # and inference code will need to access the model artifacts. role = get_execution_role() # We need to retrieve the location of the container which is provided by Amazon for using XGBoost. # As a matter of convenience, the training and inference code both use the same container. from sagemaker.amazon.amazon_estimator import get_image_uri container = get_image_uri(session.boto_region_name, 'xgboost') # TODO: Create a SageMaker estimator using the container location determined in the previous cell. # It is recommended that you use a single training instance of type ml.m4.xlarge. It is also # recommended that you use 's3://{}/{}/output'.format(session.default_bucket(), prefix) as the # output path. s3_output_location = 's3://{}/{}/output'.format(session.default_bucket(),prefix) xgb = sagemaker.estimator.Estimator(container, role, train_instance_count=1, train_instance_type='ml.m5.large', train_volume_size = 1, output_path=s3_output_location, sagemaker_session=sagemaker.Session()) # TODO: Set the XGBoost hyperparameters in the xgb object. Don't forget that in this case we have a binary # label so we should be using the 'binary:logistic' objective. xgb.set_hyperparameters(max_depth=5, eta=0.2, gamma=4, min_child_weight=6, subsample=0.8, silent=0, objective='binary:logistic', early_stopping_rounds=10, num_round=500) ``` ### Fit the XGBoost model Now that our model has been set up we simply need to attach the training and validation datasets and then ask SageMaker to set up the computation. ``` s3_input_train = sagemaker.s3_input(s3_data=train_location, content_type='csv') s3_input_validation = sagemaker.s3_input(s3_data=val_location, content_type='csv') xgb.fit({'train': s3_input_train, 'validation': s3_input_validation}) ``` ### (TODO) Testing the model Now that we've fit our XGBoost model, it's time to see how well it performs. To do this we will use SageMakers Batch Transform functionality. Batch Transform is a convenient way to perform inference on a large dataset in a way that is not realtime. That is, we don't necessarily need to use our model's results immediately and instead we can peform inference on a large number of samples. An example of this in industry might be peforming an end of month report. This method of inference can also be useful to us as it means to can perform inference on our entire test set. To perform a Batch Transformation we need to first create a transformer objects from our trained estimator object. ``` # TODO: Create a transformer object from the trained model. Using an instance count of 1 and an instance type of ml.m4.xlarge # should be more than enough. xgb_transformer = xgb.transformer(instance_count=1, instance_type='ml.m5.large') ``` Next we actually perform the transform job. When doing so we need to make sure to specify the type of data we are sending so that it is serialized correctly in the background. In our case we are providing our model with csv data so we specify `text/csv`. Also, if the test data that we have provided is too large to process all at once then we need to specify how the data file should be split up. Since each line is a single entry in our data set we tell SageMaker that it can split the input on each line. ``` # TODO: Start the transform job. Make sure to specify the content type and the split type of the test data. xgb_transformer.transform(test_location, content_type='text/csv', split_type='Line') ``` Currently the transform job is running but it is doing so in the background. Since we wish to wait until the transform job is done and we would like a bit of feedback we can run the `wait()` method. ``` xgb_transformer.wait() ``` Now the transform job has executed and the result, the estimated sentiment of each review, has been saved on S3. Since we would rather work on this file locally we can perform a bit of notebook magic to copy the file to the `data_dir`. ``` !aws s3 cp --recursive $xgb_transformer.output_path $data_dir ``` The last step is now to read in the output from our model, convert the output to something a little more usable, in this case we want the sentiment to be either `1` (positive) or `0` (negative), and then compare to the ground truth labels. ``` predictions = pd.read_csv(os.path.join(data_dir, 'test.csv.out'), header=None) predictions = [round(num) for num in predictions.squeeze().values] from sklearn.metrics import accuracy_score accuracy_score(test_y, predictions) ``` ## Optional: Clean up The default notebook instance on SageMaker doesn't have a lot of excess disk space available. As you continue to complete and execute notebooks you will eventually fill up this disk space, leading to errors which can be difficult to diagnose. Once you are completely finished using a notebook it is a good idea to remove the files that you created along the way. Of course, you can do this from the terminal or from the notebook hub if you would like. The cell below contains some commands to clean up the created files from within the notebook. ``` # First we will remove all of the files contained in the data_dir directory !rm $data_dir/* # And then we delete the directory itself !rmdir $data_dir # Similarly we will remove the files in the cache_dir directory and the directory itself !rm $cache_dir/* !rmdir $cache_dir ```
github_jupyter
# Objects *Python* is an object oriented language. As such it allows the definition of classes. For instance lists are also classes, that's why there are methods associated with them (i.e. `append()`). Here we will see how to create classes and assign them attributes and methods. ## Definition and initialization A class gathers functions (called methods) and variables (called attributes). The main of goal of having this kind of structure is that the methods can share a common set of inputs to operate and get the desired outcome by the programmer. In *Python* classes are defined with the word `class` and are always initialized with the method ``__init__``, which is a function that *always* must have as input argument the word `self`. The arguments that come after `self` are used to initialize the class attributes. In the following example we create a class called ``Circle``. ``` class Circle: def __init__(self, radius): self.radius = radius #all attributes must be preceded by "self." ``` To create an instance of this class we do it as follows ``` A = Circle(5.0) ``` We can check that the initialization worked out fine by printing its attributes ``` print(A.radius) ``` We now redefine the class to add new method called `area` that computes the area of the circle ``` class Circle: def __init__(self, radius): self.radius = radius #all attributes must be preceded by "self." def area(self): import math return math.pi * self.radius * self.radius A = Circle(1.0) print(A.radius) print(A.area()) ``` ### Exercise 3.1 Redefine the class `Circle` to include a new method called `perimeter` that returns the value of the circle's perimeter. We now want to define a method that returns a new Circle with twice the radius of the input Circle. ``` class Circle: def __init__(self, radius): self.radius = radius #all attributes must be preceded by "self." def area(self): import math return math.pi * self.radius * self.radius def enlarge(self): return Circle(2.0*self.radius) A = Circle(5.0) # Create a first circle B = A.enlarge() # Use the method to create a new Circle print(B.radius) # Check that the radius is twice as the original one. ``` We now add a new method that takes as an input another element of the class `Circle` and returns the total area of the two circles ``` class Circle: def __init__(self, radius): self.radius = radius #all attributes must be preceded by "self." def area(self): import math return math.pi * self.radius * self.radius def enlarge(self): return Circle(2.0*self.radius) def add_area(self, c): return self.area() + c.area() A = Circle(1.0) B = Circle(2.0) print(A.add_area(B)) print(B.add_area(A)) ``` ### Exercise 3.2 Define the class `Vector3D` to represent vectors in 3D. The class must have * Three attributes: `x`, `y`, and `z`, to store the coordinates. * A method called `dot` that computes the dot product $$\vec{v} \cdot \vec{w} = v_{x}w_{x} + v_{y}w_{y} + v_{z}w_{z}$$ The method could then be used as follows ```python v = Vector3D(2, 0, 1) w = Vector3D(1, -1, 3) ``` ```python v.dot(w) 5 ```
github_jupyter
# IntegratedML applied to biomedical data, using PyODBC This notebook demonstrates the following: - Connecting to InterSystems IRIS via PyODBC connection - Creating, Training and Executing (PREDICT() function) an IntegratedML machine learning model, applied to breast cancer tumor diagnoses - INSERTING machine learning predictions into a new SQL table - Executing a relatively complex SQL query containing IntegratedML PREDICT() and PROBABILITY() functions, and flexibly using the results to filter and sort the output ### ODBC and pyODBC Resources Often, connecting to a database is more than half the battle when developing SQL-heavy applications, especially if you are not familiar with the tools, or more importantly the particular database system. If this is the case, and you are just getting started using PyODBC and InterSystems IRIS, this notebook and these resources below may help you get up to speed! https://gettingstarted.intersystems.com/development-setup/odbc-connections/ https://irisdocs.intersystems.com/irislatest/csp/docbook/DocBook.UI.Page.cls?KEY=BNETODBC_support#BNETODBC_support_pyodbc https://stackoverflow.com/questions/46405777/connect-docker-python-to-sql-server-with-pyodbc https://stackoverflow.com/questions/44527452/cant-open-lib-odbc-driver-13-for-sql-server-sym-linking-issue ``` # make the notebook full screen from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) ``` ### 1. Install system packages for ODBC ``` !apt-get update !apt-get install gcc !apt-get install -y tdsodbc unixodbc-dev !apt install unixodbc-bin -y !apt-get clean ``` #### Use this command to troubleshoot a failed pyodbc installation: !pip install --upgrade --global-option=build_ext --global-option="-I/usr/local/include" --global-option="-L/usr/local/lib" pyodbc ``` !pip install pyodbc !rm /etc/odbcinst.ini !rm /etc/odbc.ini !ln -s /tf/odbcinst.ini /etc/odbcinst.ini !ln -s /tf/odbc.ini /etc/odbc.ini !cat /tf/odbcinst.ini !cat /tf/odbc.ini !odbcinst -j ``` ### 2. Verify you see "InterSystems ODBC35" in the drivers list ``` import pyodbc print(pyodbc.drivers()) ``` ### 3. Get an ODBC connection ``` import pyodbc import time #input("Hit any key to start") dsn = 'IRIS QuickML demo via PyODBC' server = 'irisimlsvr' #'192.168.99.101' port = '51773' #'9091' database = 'USER' username = 'SUPERUSER' password = 'SYS' cnxn = pyodbc.connect('DRIVER={InterSystems ODBC35};SERVER='+server+';PORT='+port+';DATABASE='+database+';UID='+username+';PWD='+ password) ### Ensure it read strings correctly. cnxn.setdecoding(pyodbc.SQL_CHAR, encoding='utf8') cnxn.setdecoding(pyodbc.SQL_WCHAR, encoding='utf8') cnxn.setencoding(encoding='utf8') ``` ### 4. Get a cursor; start the timer ``` cursor = cnxn.cursor() start= time.clock() ``` ### 5. specify the training data, and give a model name ``` dataTable = 'SQLUser.BreastCancer' dataTablePredict = 'Result02' dataColumn = 'Diagnosis' dataColumnPredict = "PredictedDiagnosis" modelName = "bc" #chose a name - must be unique in server end ``` ### 6. Train and predict ``` cursor.execute("CREATE MODEL %s PREDICTING (%s) FROM %s" % (modelName, dataColumn, dataTable)) cursor.execute("TRAIN MODEL %s FROM %s" % (modelName, dataTable)) cursor.execute("Create Table %s (%s VARCHAR(100), %s VARCHAR(100))" % (dataTablePredict, dataColumnPredict, dataColumn)) cursor.execute("INSERT INTO %s SELECT TOP 20 PREDICT(%s) AS %s, %s FROM %s" % (dataTablePredict, modelName, dataColumnPredict, dataColumn, dataTable)) cnxn.commit() ``` ### 7. Show the predict result ``` import pandas as pd from IPython.display import display df1 = pd.read_sql("SELECT * from %s ORDER BY ID" % dataTablePredict, cnxn) display(df1) ``` ### 8. Show a complicated query IntegratedML function PREDICT() and PROBABILITY() can appear virtually anywhere in a SQL query, for maximal flexibility! Below we are SELECTing columns as well as the result of the PROBABILITY function, and then filtering on the result of the PREDICT function. To top it off, ORDER BY is using the output of PROBSBILITY for sorting. ``` df2 = pd.read_sql("SELECT ID, PROBABILITY(bc FOR 'M') AS Probability, Diagnosis FROM %s \ WHERE MeanArea BETWEEN 300 AND 600 AND MeanRadius > 5 AND PREDICT(%s) = 'M' \ ORDER BY Probability" % (dataTable, modelName),cnxn) display(df2) ``` ### 9. Close and clean ``` cnxn.close() end= time.clock() print ("Total elapsed time: ") print (end-start) #input("Hit any key to end") ```
github_jupyter
# Advanced Data Wrangling with Pandas ``` import pandas as pd import numpy as np ``` ## Formas não usuais de se ler um dataset Você não precisa que o arquivo com os seus dados esteja no seu disco local, o pandas está preparado para adquirir arquivos via http, s3, gs... ``` diamonds = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/diamonds.csv") diamonds.head() ``` Você também pode crawlear uma tabela de uma página da internet de forma simples ``` clarity = pd.read_html("https://www.brilliantearth.com/diamond-clarity/") clarity clarity = clarity[0] clarity clarity.columns = ['clarity', 'clarity_description'] clarity ``` ## Como explodir a coluna de um dataframe ``` clarity['clarity'] = clarity['clarity'].str.split() clarity type(clarity.loc[0, 'clarity']) clarity = clarity.explode("clarity") clarity ``` ## Como validar o merge Esse parametro serve para validar a relação entre as duas tabelas que você está juntando. Por exemplo, se a relação é 1 para 1, 1 para muitos, muitos para 1 ou muitos para muitos. ``` diamonds.merge(clarity, on='clarity', validate="m:1") clarity_with_problem = clarity.append(pd.Series({"clarity": "SI2", "clarity_description": "slightly included"}), ignore_index=True) clarity_with_problem diamonds.merge(clarity_with_problem, on='clarity', validate="m:1") diamonds.merge(clarity_with_problem, on='clarity') ``` ### Por que isso é importante? O que aconteceria seu tivesse keys duplicadas no meu depara. Ele duplicou as minhas linhas que tinham a key duplicada, o dataset foi de 53,940 linhas para 63,134 linhas ## Como usar o método `.assign` Para adicionar ou modificar colunas do dataframe. Você pode passar como argumento uma constante para a coluna ou um função que tenha como input um `pd.DataFrame` e output uma `pd.Series`. ``` diamonds.assign(foo="bar", bar="foo") diamonds.assign(volume=lambda df: df['x'] * df['y'] * df['z']) def calculate_volume(df): return df['x'] * df['y'] * df['z'] diamonds.assign(volume=calculate_volume) diamonds['volume'] = diamonds['x'] * diamonds['y'] * diamonds['z'] diamonds ``` ## Como usar o método `.query` Para filtrar. Tende a ser util quando você quer filtrar o dataframe baseado em algum estado intermediário ``` diamonds = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/diamonds.csv") diamonds.head() diamonds.describe() diamonds[(diamonds['x'] == 0) | (diamonds['y'] == 0) | (diamonds['z'] == 0)] diamonds.query("x == 0 | y == 0 | z == 0") x = diamonds \ .assign(volume=lambda df: df['x'] * df['y'] * df['z']) x = x[x['volume'] > 0] diamonds = diamonds \ .assign(volume=lambda df: df['x'] * df['y'] * df['z']) \ .query("volume > 0") diamonds ``` Você também pode usar variáveis externas ao dataframe dentro da sua query, basta usar @ como marcador. ``` selected_cut = "Premium" diamonds.query("cut == @selected_cut") ``` Quase qualquer string que seria um código python válido, vai ser uma query valida ``` diamonds.query("clarity.str.startswith('SI')") ``` Porém o parser do pandas tem algumas particularidades, como o `==` que também pode ser um `isin` ``` diamonds.query("color == ['E', 'J']") diamonds = diamonds.query("x != 0 & y != 0 & z != 0") ``` Exemplo de que precisamos do estado intermediário para fazer um filtro. Você cria uma nova coluna e quer filtrar baseado nela sem precisar salvar esse resultado em uma variável intermerdiária ## Como usar o método `.loc` e `.iloc` Uma das desvantagens do `.query` é que fica mais difícil fazer análise estática do código, os editores geralmente não suportam syntax highlighting. Um jeito de solucionar esse problemas é usando o `.loc` ou `.iloc`, que além de aceitarem mascaras, eles aceitam funções também. ``` diamonds.loc[[0, 1, 2], ['clarity', 'depth']] diamonds.iloc[[0, 1, 2], [3, 4]] diamonds.sort_values("depth") diamonds.sort_values("depth").loc[[0, 1, 2]] diamonds.sort_values("depth").iloc[[0, 1, 2]] diamonds.loc[diamonds["price"] > 6000] diamonds["price"] > 6000 diamonds.loc[lambda x: x['price'] > 6000] diamonds[diamonds['price'] > 10000]['price'] = 10000 diamonds.query("price > 10000") diamonds.loc[diamonds['price'] > 10000, 'price'] = 10000 diamonds.query("price > 10000") ``` ## O que o `.groupby(...) retorna` ``` diamonds = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/diamonds.csv") \ .assign(volume=lambda x: x['x'] * x['y'] * x['z']) \ .query("volume > 0") diamonds.head() grouped_diamonds = diamonds.groupby("cut") grouped_diamonds list(grouped_diamonds) ``` ## Os N formatos de agregação do pandas A função `.agg` é um *alias* da função `.aggregate`, então elas tem o mesmo resultado. O Pandas tem algumas funções padrão que permitem que você passe só o nome delas, ao invés do *callable*: * "all" * "any" * "count" * "first" * "idxmax" * "idxmin" * "last" * "mad" * "max" * "mean" * "median" * "min" * "nunique" * "prod" * "sem" * "size" * "skew" * "std" * "sum" * "var" Você pode passar uma lista de callable e o pandas vai aplicar todas as funções para todas as colunas. Faz sentido se são muitas funções e poucas colunas. Um problema é que ele vai nomear as novas colunas com base na coluna anterior e na função, quando você usa uma lambda isso causa um problema. ``` diamonds.groupby('clarity').agg(['mean', 'sum', np.max, lambda x: x.min()]) ``` Você também pode passar um dicionário de listas, assim você pode escolher qual função será aplicada em cada coluna, você ainda tem o problema de nome das novas colunas ao usar uma função anônima. ``` diamonds.groupby('clarity').agg({"x": 'mean', 'price': [np.max, 'max', max, lambda x: x.max()]}) ``` A terceira opção é o NamedAgg foi lançada recentemente. Ela resolve o problema de nomes de colunas. Você passa como parâmetro uma tupla para cada agregação que você quer. O primeiro elemento é o nome da coluna e o segundo é a função. \* *O Dask ainda não aceita esse tipo de agregação* ``` diamonds.groupby('clarity').agg(max_price=('price', 'max'), total_cost=('price', lambda x: x.sum())) ``` ## `.groupby(...).apply(...)` Um problema comum a todas essas abordagens é que você não consegue fazer uma agregação que depende de duas colunas. Para a maior parte dos casos existe uma forma razoável de resolver esse problema criando uma nova coluna e aplicando a agregação nela. Porém, se isso não foi possível, dá para usar o `.groupby(...).apply()`. ``` # Nesse caso ao invés da função de agregação receber a pd.Series relativa ao grupo, # ela vai receber o subset do grupo. Aqui vamos printar cada grupo do df de forma # separada diamonds.groupby('cut').apply(lambda x: print(x.head().to_string() + "\n")) ``` Esse formato de agregação introduz algumas complexidades, porque sua função pode retornar tanto um pd.DataFrame, pd.Series ou um escalar. O pandas vai tentar fazer um broadcasting do que você retorna para algo que ele acha que faz sentido. Exemplos: Se você retornar um escalar, o apply vai retornar uma `pd.Series` em que cada elemento corresponde a um grupo do .groupby ``` # Retornando um escalar def returning_scalar(df: pd.DataFrame) -> float: return (df["x"] * df["y"] * df['z']).mean() diamonds.groupby("cut").apply(returning_scalar) ``` Se você retornar uma `pd.Series` nomeada, o apply vai retornar um `pd.DataFrame` em que cada linha corresponde a um grupo do `.groupby` e cada coluna corresponde a uma key do pd.Series que você retorna na sua função de agregação ``` def returning_named_series(df: pd.DataFrame) -> pd.Series: volume = (df["x"] * df["y"] * df['z']) price_to_volume = df['price'] / volume return pd.Series({"mean_volume": volume.mean(), "mean_price_to_volume": price_to_volume.mean()}) diamonds.groupby("cut").apply(returning_named_series) ``` Se você retornar um `pd.DataFrame`, o apply vai retornar uma concatenação dos desses `pd.DataFrame` ``` def returning_dataframe(df: pd.DataFrame) -> pd.DataFrame: return df[df['volume'] >= df['volume'].median()] diamonds.groupby("cut").apply(returning_dataframe) ``` Se você retornar uma `pd.Series` não nomeada, o apply vai retornar uma `pd.Series` que é uma concatenação das `pd.Series` que você retorna da sua função ``` def returning_unnamed_series(df: pd.DataFrame) -> pd.Series: return df.loc[df['volume'] >= df['volume'].median(), 'volume'] diamonds.groupby("cut").apply(returning_unnamed_series) ``` De forma resumida, o `.groupby(...).apply(...)` é extremamente flexível, ele consegue filtrar, agregar e tranformar. Mas é mais complicado de usar e é bem lento se comparado aos outros métodos de agregação. Só use se necessário. | Saída da Função | Saída do apply | |-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Escalar | Uma pd.Series em que cada elemento corresponde a um grupo do .groupby | | pd.Series nomeada | Um pd.DataFrame em que cada linha corresponde a um grupo do .groupby e cada coluna corresponde a uma key do pd.Series que você retorna na sua função de agregação | | pd.Series não nomeada | Uma `pd.Series` que é uma concatenação das `pd.Series` que você retorna da sua função | | pd.DataFrame | Uma concatenação dos desses `pd.DataFrame` | ## Como usar o método `.pipe` O `.pipe` aplica uma função ao dataframe ``` def change_basis(df: pd.DataFrame, factor=10): df[['x', 'y', 'z']] = df[['x', 'y', 'z']] * factor return df diamonds.pipe(change_basis) ``` Nós não atribuimos o resultado da nossa operação a nenhuma variável, então teoricamente se rodarmos de novo, o resultado vai ser o mesmo. ``` diamonds.pipe(change_basis) ``` Isso acontece porque a sua função está alterando o `pd.DataFrame` original ao invés de criar uma cópia, isso é um pouco contra intuitivo porque o Pandas por padrão faz as suas operações em copias da tabela. Para evitar isso podemos fazer uma cópia do dataframe manualmente ``` diamonds = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/diamonds.csv") def change_basis(df: pd.DataFrame, factor=10): df = df.copy() df[['x', 'y', 'z']] = df[['x', 'y', 'z']] * factor return df diamonds.pipe(change_basis, factor=10) diamonds ``` ## Como combinar o `.assign`, `.pipe`, `.query` e `.loc` para um Pandas mais idiomático Os métodos mais importantes para *Method Chaining* são * `.assign` * `.query` * `.loc` * `.pipe` ``` diamonds = pd.read_csv("https://raw.githubusercontent.com/mwaskom/seaborn-data/master/diamonds.csv") diamonds.head() diamonds_cp = diamonds.copy() diamonds_cp[['x', 'y', 'z']] = diamonds_cp[['x', 'y', 'z']] * 10 diamonds_cp['volume'] = diamonds_cp['x'] * diamonds_cp['y'] * diamonds_cp['z'] diamonds_cp = diamonds_cp[diamonds_cp['volume'] > 0] diamonds_cp = pd.merge(diamonds_cp, clarity, on='clarity', how='left') diamonds_cp def change_basis(df: pd.DataFrame, factor=10): df = df.copy() df[['x', 'y', 'z']] = df[['x', 'y', 'z']] * factor return df diamonds \ .copy() \ .pipe(change_basis, factor=10) \ .assign(volume=lambda df: df['x'] * df['y'] * df['z']) \ .query("volume > 0") \ .merge(clarity, on='clarity', how='left') ``` Um problema que pode acontecer quando você usa o method chaining é você acabar com um bloco gigantesco que é impossível de debugar, uma boa prática é quebrar seus blocos por objetivos ## Como mandar um dataframe para a sua clipboard Geralmente isso não é uma boa pratica, mas as vezes é útil para enviar uma parte do dado por mensagem ou para colar em alguma planilha. ``` df = pd.DataFrame({'a':list('abc'), 'b':np.random.randn(3)}) df df.to_clipboard() df.to_csv("df.csv") ``` Você também pode ler da sua *clipboard* com `pd.read_clipboard(...)`. O que é uma prática pior ainda, mas em alguns casos pode ser útil. ## Recursos https://pandas.pydata.org/docs/user_guide/cookbook.html https://tomaugspurger.github.io/modern-1-intro.html
github_jupyter
01: Building a pandas Cheat Sheet, Part 1 Use the csv I've attached to answer the following questions Import pandas with the right name ``` # !workon dataanalysis import pandas as pd ``` Having matplotlib play nice with virtual environments The matplotlib library has some issues when you’re using a Python 3 virtual environment. The error looks like this: RuntimeError: Python is not installed as a framework. The Mac OS X backend will not be able to function correctly if Python is not installed as a framework. See the Python documentation for more information on installing Python as a framework on Mac OS X. Please either reinstall Python as a framework, or try one of the other backends. If you are Working with Matplotlib in a virtual enviroment see ‘Working with Matplotlib in Virtual environments’ in the Matplotlib FAQ Luckily it’s an easy fix. mkdir -p ~/.matplotlib && echo 'backend: TkAgg' >> ~/.matplotlib/matplotlibrc (ADD THIS LINE TO TERMINAL) This adds a line to the matplotlib startup script to set the backend to TkAgg, whatever that means. Set all graphics from matplotlib to display inline ``` import matplotlib.pyplot as plt #DISPLAY MOTPLOTLIB INLINE WITH THE NOTEBOOK AS OPPOSED TO POP UP WINDOW %matplotlib inline ``` Read the csv in (it should be UTF-8 already so you don't have to worry about encoding), save it with the proper boring name ``` df = pd.read_csv('07-hw-animals.csv') df # Display the names of the columns in the csv df.columns ``` Display the first 3 animals. ``` df.head(3) # Sort the animals to see the 3 longest animals. df.sort_values('length', ascending = False).head(3) # What are the counts of the different values of the "animal" column? a.k.a. how many cats and how many dogs. # Only select the dogs. (df['animal'] == 'dog').value_counts() # Display all of the animals that are greater than 40 cm. df[df['length'] > 40] ``` 'length' is the animal's length in cm. Create a new column called inches that is the length in inches. ``` length_in = df['length']* 0.3937 df['length (in.)'] = length_in ``` Save the cats to a separate variable called "cats." Save the dogs to a separate variable called "dogs." ``` dogs = df[df['animal'] == 'dog'] cats = df[df['animal'] == 'cat'] ``` Display all of the animals that are cats and above 12 inches long. First do it using the "cats" variable, then do it using your normal dataframe. ``` cats['length'] > 12 df[(df['length'] > 12) & (df['animal'] == 'cat')] ``` What's the mean length of a cat? ``` # cats.describe() displays all stats for length cats['length'].mean() #only shows mean length cats.mean() ``` What's the mean length of a dog? ``` dogs['length'].mean() dogs['length'].describe() dogs.mean() ``` Use groupby to accomplish both of the above tasks at once. ``` df.groupby('animal')['length (in.)'].mean() ``` Make a histogram of the length of dogs. I apologize that it is so boring. ``` dogs.plot(kind='hist', y = 'length (in.)') # all the same length "/ ``` Change your graphing style to be something else (anything else!) ``` df.plot(kind="bar", x="name", y="length", color = "red", legend =False) df.plot(kind="barh", x="name", y="length", color = "red", legend =False) dogs dogs.plot(kind='bar') # dogs.plot(kind='scatter', x='name', y='length (in.)') ``` Make a horizontal bar graph of the length of the animals, with their name as the label ``` df.columns dogs['name'] dogs.plot(kind='bar', x='name', y = 'length', legend=False) ``` Make a sorted horizontal bar graph of the cats, with the larger cats on top. ``` cats.sort_values('length').plot(kind='barh', x='name', y = 'length', legend = False) ```
github_jupyter
<img src="images/kiksmeisedwengougent.png" alt="Banner" width="1100"/> <div style='color: #690027;' markdown="1"> <h1>FUNCTIES EN STRUCTUREN</h1> </div> <div class="alert alert-block alert-success"> Python kent heel wat ingebouwde functies, zoals <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">print()</span>, <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">input()</span>, <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">type()</span>, <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">int()</span> en <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">len()</span>. <br><br>Je kan ook al zelf functies definiëren. Je kent ook al herhalingsstructuren en keuzestructuren. In deze notebook oefen je dat verder in. </div> <div class="alert alert-block alert-info"> Als je bepaalde code meerdere keren wilt gebruiken, loont het de moeite om zelf een functie te definiëren.<br> Functies worden gedeclareerd met het sleutelwoord <b>def</b>. Via het sleutelwoord <b>return</b> geven ze een resultaat terug.<br> Een <b>docstring</b> verduidelijkt het doel van de functie. Een docstring staat tussen driedubbele aanhalingstekens, begint met een hoofletter en eindigt met een punt. </div> <div style='color: #690027;' markdown="1"> <h2>1. Functie met keuzestructuur, invoer, uitvoer, docstring</h2> </div> ### Voorbeeld Bij uitvoer van het volgende script wordt de gebruiker naar een willekeurig geheel getal gevraagd. <br> Erna wordt 7 gedeeld door dat getal via de functie `zevendelen()`. Het quotiënt wordt getoond. Als de gebruiker het getal 0 invoert, is er echter geen quotiënt.<br> Voer het script enkele keren uit. Probeer verschillende getallen, vergeet 0 niet. ``` def zevendelen(getal): """Quotiënt van 7 met een getal.""" if getal != 0: resultaat = 7 / getal # als noemer niet 0 is, bereken quotiënt else: resultaat = "Er is geen quotiënt want je kan niet delen door nul." # als noemer 0 is, dan is er geen quotiënt return resultaat # invoer # 7 is teller, in te voeren getal is noemer print("Deel 7 door een geheel getal naar keuze.") noemer = int(input("Geef een geheel getal naar keuze waardoor 7 moet gedeeld worden: ")) # typecasting: string omzetten naar int # invoer verwerken quot = zevendelen(noemer) # quotiënt # uitvoer print(quot) ``` <div class="alert alert-block alert-info"> Begrijp je wat er gebeurt?<br> Er wordt gevraagd naar een getal. Jij geeft dat getal in. Deze invoer wordt geïnterpreteerd als string, maar met typecasting omgezet naar een object dat type int heeft. De variabele <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">noemer</span> refereert aan dat object met type int. <br> De invoer wordt vervolgens verwerkt. De functie wordt opgeroepen: 7 wordt gedeeld door de waarde van <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">noemer</span>, tenminste als die waarde niet nul is. Het quotiënt is een object dat type float heeft. De variabele <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">resultaat</span> verwijst naar dat float-object. In het geval dat de invoer 0 is, verwijst de variabele <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">resultaat</span> naar een object dat type <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">string</span> heeft. <br> De variabele <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">quot</span> refereert aan hetzelfde object als de variabele <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">resultaat</span>, die de functie teruggeeft. <br> De waarde van dat object <span style="background-color:whitesmoke; font-family:consolas; font-size:1em;">quot</span> wordt getoond. <br> <br> De volgende zin vertelt wat het doel van de functie is: Quotiënt van 7 met een getal. Het is een <b>docstring</b>. </div> <div class="alert alert-block alert-warning"> Meer uitleg over <em>typecasting</em> vind je in de notebook 'Datastructuur'. </div> ### Oefening 1.1 - Welk type heeft `noemer`? - Welk type heeft het object waarnaar `quot` verwijst? Antwoord: ### Oefening 1.2 Schrijf een script dat de vierkantswortel van een getal teruggeeft, indien dat mogelijk is. Anders verschijnt een boodschap met de reden waarom er geen is. <div style='color: #690027;' markdown="1"> <h2>2. Module Random - herhalingsstructuur</h2> </div> *In de module NumPy* is er een *module Random* waarmee men toevalsgetallen kan genereren. <br>In het volgende script worden twee functies uit de module Random gebruikt: de functie `random()` om decimale getallen te genereren en de functie `randint()`, met twee parameters, om gehele getallen te genereren. Het is dus nodig om eerst de module NumPy te importeren.<br> De getallen gegenereerd door `random()` liggen altijd in [0,1[ en de getallen gegenereerd door `randint()` liggen in het halfopen interval bepaald door de twee argumenten die je aan de functie meegeeft, de bovengrens niet inbegrepen. <br> Om deze functies op te roepen, laat je ze voorgaan door `np.random.`. ### Voorbeeld 2.1 Test het volgende script eens uit: ``` import numpy as np # for-lus wordt 10 keer doorlopen for teller in range(10): print("teller = ", teller) print(np.random.random()) # functie random() genereert decimaal getal in [0,1[ print(np.random.randint(0, 4)) # functie randint(0,4) kiest willekeurig geheel getal uit [0,4[ print() ``` ### Oefening 2.1: dobbelsteen Jan en Piet willen samen een gezelschapsspel spelen. Diegene die met twee dobbelstenen het hoogste aantal ogen gooit, mag starten. Ze kunnen de dobbelstenen echter niet vinden. <br>Korneel schrijft een script dat het twee keer gooien met twee dobbelstenen nabootst en als boodschap meegeeft wie er mag starten.<br> Er wordt opnieuw geworpen, zolang Jan en Piet hetzelfde aantal ogen gooien. Vul het script aan en voer het uit. ``` def worp(): """Aantal ogen na worp met twee dobbelstenen.""" dob1 = np.random.randint(..., ...) dob2 = np.random.randint(..., ...) aantal_ogen = dob1 + dob2 return aantal_ogen # Jan werpt worp_jan = worp() # Piet werpt ... # bij ex aequo opnieuw gooien while worp_jan == ....: worp_jan = worp() worp_piet = worp() print("Jan werpt een" + str(worp_jan) + ".") print("Piet werpt een" + str(worp_piet) + ".") # bepalen wat hoogste worp is en wie mag starten grootste = max(worp_jan, worp_piet) if grootste == worp_jan: beginner = "Jan" else: ... # output print(..., "mag starten.") ``` ### Voorbeeld 2.2 Met de Random-functie `rand()` kan je ook een NumPy-lijst met een bepaalde lengte genereren. *De elementen behoren tot het halfopen interval [0,1[*.<br> Met de Random-functie `randint()` kan je er een genereren met gehele getallen.<br> Test dit uit via het volgende script.<br> Let goed op de parameters van deze functies en merk op dat er gewerkt wordt met een halfopen interval. ``` willekeurige_lijst = np.random.rand(8) willekeurige_lijst_gehele_getallen = np.random.randint(10, 29, 4) print(willekeurige_lijst) print(willekeurige_lijst_gehele_getallen) ``` ### Oefening 2.2 - Genereer een NumPy-lijst met 15 elementen en waarvan de elementen gehele getallen zijn, gelegen in het interval [4,9]. <br>Laat de NumPy-lijst zien. - Genereer een NumPy-lijst met 15 elementen en waarvan de elementen kommagetallen zijn, gelegen in het interval [0, 9[. <br>Laat de NumPy-lijst zien. <div style='color: #690027;' markdown="1"> <h2>3. Repetitieve taken</h2> </div> ### Voorbeeld: DNA Het erfelijke materiaal van een levend organisme is opgeslagen in het DNA. Een DNA-molecuul bestaat uit twee lange strengen van nucleotiden, die in de vorm van een dubbele helix met elkaar vervlochten zijn. Nucleotiden zijn een specifieke groep organische verbindingen. Een DNA-streng bevat vier verschillende nucleotiden met een van de nucleobasen adenine, thymine, guanine en cytosine als component. Deze nucleobasen worden afgekort tot respectievelijk de letters A, T, G en C. De DNA-sequentie is de volgorde van deze nucleotiden op een streng DNA. Er zijn zeer veel sequenties mogelijk. Met de functies `choice()` uit de module Random van de module NumPy en `join()` kun je de letters kiezen en samenbrengen. Met de volgende functie kan je een willekeurige DNA-sequentie met een gewenste lengte genereren: ``` def dna_sequentie(n): """DNA-sequentie genereren met gewenste lengte.""" letters = ["A", "C", "G", "T"] keuze = [np.random.choice(letters) for i in range(n)] # kies n letters uit lijst letters en stop die in andere lijst # print(keuze) string = "".join(keuze) # elementen van keuze samenbrengen in een (samenhangende) string zonder keuze aan te passen return string ``` Genereer een DNA-sequentie van lengte 40. Alternatief: je kan een module Random gebruiken (een andere, niet die van NumPy). Je kunt dan letters kiezen uit een string i.p.v. een lijst. Met de volgende functie kan je dan een willekeurige DNA-sequentie met een gewenste lengte genereren: ``` import random def dna_sequentie_2(n): """DNA-sequentie genereren met gewenste lengte.""" letters = "ACGT" keuze = [random.choice(letters) for i in range(n)] # kies n letters uit string letters en stop die in lijst # print(keuze) string = "".join(keuze) # elementen van keuze samenbrengen in een (samenhangende) string zonder keuze aan te passen return string ``` Genereer een DNA-sequentie van lengte 30 met deze tweede functie. ### Oefening 3.1 Stel een functie op om te tellen hoeveel keer een bepaalde nucleobase in een gegeven DNA-sequentie voorkomt. Test de functie uit: laat tellen hoeveel keer de nucleobase "T" voorkomt in de DNA-sequentie "ATGCGGACCTAT". ### Oefening 3.2 Gebruik beide functies (een uit het voorbeeld en de functie die je zelf maakte) samen in een script om te tellen hoeveel keer elke nucleobase in een willekeurig gegeneerde DNA-sequentie voorkomt. <div class="alert alert-block alert-info">De computer is razendsnel om repetitieve taken te doen, zoals een letter opsporen in een lange string. </div> <div class="alert alert-block alert-warning">Het neurale netwerk van het project 'KIKS' zal geen letter opsporen, maar wel een huidmondje. Het neurale netwerk doorloopt de volledige foto door er een vierkant venster over te laten glijden en na te gaan of er binnen het vierkantje een huidmondje te zien is. </div> <img src="images/cclic.png" alt="Banner" align="left" style="width:100px;"/><br><br> Notebook KIKS, zie <a href="http://www.aiopschool.be">AI Op School</a>, van F. wyffels & N. Gesquière is in licentie gegeven volgens een <a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>. <div> <h2>Met steun van</h2> </div> <img src="images/kikssteun.png" alt="Banner" width="800"/>
github_jupyter
``` # Code to load the data etc. import pandas as pd # Read the data credit_card_file_path = 'data/AER_credit_card_data.csv' # Set file path of the data. data = pd.read_csv(credit_card_file_path, true_values = ['yes'], false_values = ['no']) # Read the data and store in a data frame. # Select target y = data.card # Select predictors X = data.drop(['card'], axis=1) print("Number of rows in the dataset:", X.shape[0]) X.head() # Use cross-validation to ensure accurate measures of model quality (Small dataset) from sklearn.pipeline import make_pipeline from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score # Since there is no preprocessing, a piepleine is unnecessary, but it is good practice all the same! my_pipeline = make_pipeline(RandomForestClassifier(n_estimators=100)) cv_scores = cross_val_score(my_pipeline, X, y, cv=5, scoring='accuracy') print("Cross-validation accuracy: %f" % cv_scores.mean()) # Example output: "Cross-validation accuracy: 0.980292" # Basic data comparisons, in order to try and detect data leakage expenditures_cardholders = X.expenditure[y] expenditures_noncardholders = X.expenditure[~y] print('Fraction of those who did not receive a card and had no expenditures: %.2f' \ %((expenditures_noncardholders == 0).mean())) print('Fraction of those who received a card and had no expenditures: %.2f' \ %(( expenditures_cardholders == 0).mean())) # As shown above, everyone who did not receive a card had no expenditures, while only 2% of those who received a card had no expenditures. It's not surprising that our model appeared to have a high accuracy. But this also seems to be a case of target leakage, where expenditures probably means expenditures on the card they applied for. # Since share is partially determined by expenditure, it should be excluded too. The variables active and majorcards are a little less clear, but from the description, they sound concerning. In most situations, it's better to be safe than sorry if you can't track down the people who created the data to find out more. # Above text is taken directly from the "Data Leakage" tutorial. # Model without target leakage: # Drop leaky predictors from dataset (Established by analysing the data) potential_leaks = ['expenditure', 'share', 'active', 'majorcards'] X2 = X.drop(potential_leaks, axis=1) # Evaluate the model with leaky predictors removed cv_scores = cross_val_score(my_pipeline, X2, y, cv=5, scoring='accuracy') print("Cross-val accuracy: %f" % cv_scores.mean()) # Example output: Cross-val accuracy: 0.833201 ```
github_jupyter
# Modelo para la Ciudad de Medellín ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt ##Sección de código para ejecutar el ejercicio en COLAB sin realizar ningún cambio adicional. #from google.colab import drive #drive.mount('/content/drive') #baseUrl = '/content/drive/Shared drives/Analitica Predictiva/covid-19-flr-analitica-predictiva' #os.chdir(baseUrl) ``` Librerias requeridas para realizar los modelos. ``` from sklearn.preprocessing import PolynomialFeatures, StandardScaler from sklearn.linear_model import Lasso from sklearn.pipeline import make_pipeline from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error, mean_absolute_error ``` ## Modelo Corto Plazo ``` ciudades = ['Bogotá D.C.','Medellín','Cali','Barranquilla', 'Cartagena de Indias'] names = ['Bogota','Medellin','Cali','Barranquilla','Cartagena'] id_city = 1 name = names[id_city] urlDataSet = 'Datos/data_{}.pickle'.format(name) ``` Se carga el dataset generado en la sección de preprocesamiento, se muestran los últimos 20 registros, para validar visualmente los ultimos días de información. ``` df_city = pd.read_pickle(urlDataSet) df = df_city.copy() df.tail(20) ``` ### Activos Para la predicción de casos activos se usó una Regressión Lasso con caracteristicas Polinomiales de grado 5; previamente se estandarizaron los datos, y se entrenaron con un conjunto del 70% de los datos; estos datos se toman aleatoriamente, pero al graficarlos se orden en secuencia. ``` totalDays = len(df['dias'].values) X = df['dias'].values[0:totalDays-5].reshape(-1,1) #Variable independiente. y = df['activos'].values[0:totalDays-5].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df['dias'].values[totalDays-5:].reshape(-1,1) y_v = df['activos'].values[totalDays-5:].reshape(-1,1) #Se obtiene el 70% para entrenamiento y 30% para pruebas. X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) #Se crea un pipeline con el escalado, las caracteristicas polinomiales y la regresión lasso pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=5),Lasso(random_state=123)) #Se realiza el entrenamiento. pipe.fit(X_train,y_train) ``` Se realizan las predicciones sobre el dataset de pruebas, adicionalmente se establece el pico en el día 175, por lo cual para fines de prueba se realizan predicciones desde dicho día, hasta el día 200 de la pandemia, para probar la predicción futura de los modelos a corto plazo. ``` #Obtener predicciones para los datos de prueba. y_pred = pipe.predict(X_test) #Predicciones futuras. x_pred_future = np.arange(totalDays-10,totalDays,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) def GetMax(*args): arr = np.array([[0]]) for arg in args: s = [x for x in arg.shape] l = np.prod(s) a = np.array(arg.reshape(1,l)) arr = np.concatenate((a,arr),axis=1) return arr.max() def GetPeak(*args): x_arr = np.array([[0]]) y_arr = np.array([[0]]) for x,y in args: s = [x for x in x.shape] l = np.prod(s) xr = np.array(x.reshape(1,l)) yr = np.array(y.reshape(1,l)) x_arr = np.concatenate((x_arr,xr),axis=1) y_arr = np.concatenate((y_arr,yr),axis=1) print(x_arr) print(y_arr) print(y_arr.max()) return x_arr[0][y_arr.argmax()] Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) Xpeak = GetPeak((X_train,y_train)) ``` Se grafican los datos. ``` f = plt.figure(figsize=(18,8)) plt.title('Predicción de casos activos') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(X_test[X_test.flatten().argsort(),0],y_pred[X_test.flatten().argsort()],'--g',label='Predict') plt.plot(x_pred_future,y_pred_future,'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-5000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-5000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pcp_{}_activos'.format(name)) s = [y_pred_future.shape] l = np.prod(s) ``` Se calculan las métricas de la regresión, en este caso se calculan las metricas completas sobre el conjunto de prueba, adicionalmente se computan las metricas solo desde el pico en adelante como indicar de las predicciones futuras. ``` y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (5 days) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (5 days) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ### Recuperados Para la predicción de casos recuperados se usó una Regressión Lasso con caracteristicas Polinomiales de grado 4; previamente se estandarizaron los datos, y se entrenaron con un conjunto del 70% de los datos; estos datos se toman aleatoriamente, pero al graficarlos se orden en secuencia. ``` totalDays = len(df['dias'].values) X = df['dias'].values[0:totalDays-5].reshape(-1,1) #Variable independiente. y = df['acumulado_recuperados'].values[0:totalDays-5].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df['dias'].values[totalDays-5:].reshape(-1,1) y_v = df['acumulado_recuperados'].values[totalDays-5:].reshape(-1,1) #Se obtiene el 70% para entrenamiento y 30% para pruebas. X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) #Se crea un pipeline con el escalado, las caracteristicas polinomiales y la regresión lasso pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=4),Lasso(random_state=123)) #Se realiza el entrenamiento. pipe.fit(X_train,y_train) ``` Se realizan las predicciones sobre el dataset de pruebas, adicionalmente se establece el pico en el día 175, por lo cual para fines de prueba se realizan predicciones desde dicho día, hasta el día 200 de la pandemia, para probar la predicción futura de los modelos a corto plazo. ``` #Obtener predicciones para los datos de prueba. y_pred = pipe.predict(X_test) #Predicciones futuras. x_pred_future = np.arange(totalDays-10,totalDays,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) ``` Se grafican los datos. ``` f = plt.figure(figsize=(18,8)) plt.title('Predicción de recuperados') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(X_test[X_test.flatten().argsort(),0],y_pred[X_test.flatten().argsort()],'--g',label='Predict') plt.plot(x_pred_future,y_pred_future,'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-5000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-5000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pcp_{}_recuperados'.format(name)) ``` Se calculan las métricas de la regresión, en este caso se calculan las metricas completas sobre el conjunto de prueba, adicionalmente se computan las metricas solo desde el pico en adelante como indicar de las predicciones futuras. ``` y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (5 days) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (5 days) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ### Muertes Para la predicción de las muertes se usó una Regressión Lasso con caracteristicas Polinomiales de grado 6; previamente se estandarizaron los datos, y se entrenaron con un conjunto del 70% de los datos; estos datos se toman aleatoriamente, pero al graficarlos se orden en secuencia. ``` totalDays = len(df['dias'].values) X = df['dias'].values[0:totalDays-5].reshape(-1,1) #Variable independiente. y = df['acumulado_muertos'].values[0:totalDays-5].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df['dias'].values[totalDays-5:].reshape(-1,1) y_v = df['acumulado_muertos'].values[totalDays-5:].reshape(-1,1) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=6),Lasso(random_state=123,max_iter=10000)) pipe.fit(X_train,y_train) y_pred = pipe.predict(X_test) x_pred_future = np.arange(totalDays-10,totalDays,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) #Graph ordered data. f = plt.figure(figsize=(18,8)) plt.title('Predicción de muertes') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(X_test[X_test.flatten().argsort(),0],y_pred[X_test.flatten().argsort()],'--g',label='Predict') plt.plot(x_pred_future,y_pred_future,'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-10,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-10,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pcp_{}_muertes'.format(name)) y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (5 days) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (5 days) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ### Infectados Para la predicción de los infectados se usó una Regressión Lasso con caracteristicas Polinomiales de grado 4; previamente se estandarizaron los datos, y se entrenaron con un conjunto del 70% de los datos; estos datos se toman aleatoriamente, pero al graficarlos se orden en secuencia. ``` totalDays = len(df['dias'].values) X = df['dias'].values[0:totalDays-5].reshape(-1,1) #Variable independiente. y = df['acumulado_infectados'].values[0:totalDays-5].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df['dias'].values[totalDays-5:].reshape(-1,1) y_v = df['acumulado_infectados'].values[totalDays-5:].reshape(-1,1) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=5),Lasso(random_state=123)) pipe.fit(X_train,y_train) y_pred = pipe.predict(X_test) x_pred_future = np.arange(totalDays-10,totalDays,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) #Graph ordered data. f = plt.figure(figsize=(18,8)) plt.title('Predicción de infectados') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(X_test[X_test.flatten().argsort(),0],y_pred[X_test.flatten().argsort()],'--g',label='Predict') plt.plot(x_pred_future,y_pred_future,'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-10000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-10000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pcp_{}_infectados'.format(name)) y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (5 days) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (5 days) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ## Modelo Mediano Plazo Para los modelos de mediano plazo, se sigue la misma dinámica que en corto plazo, pero se trabaja con un conjunto de datos que se preprocesó para que tenga los datos acumulados semanalmente, esto permite obtener las predicciones de Infectados, recuperados y muertos por semana. ``` urlDataset_w = 'Datos/data_weekly_{}.pickle'.format(name) df_w = pd.read_pickle(urlDataset_w) df_w.tail(20) ``` ### Activos ``` totalW = len(df_w.index.values) X = df_w.index.values[0:totalW-3].reshape(-1,1) #Variable independiente. y = df_w['activos'].values[0:totalW-3].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df_w.index.values[totalW-3:].reshape(-1,1) y_v = df_w['activos'].values[totalW-3:].reshape(-1,1) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=5),Lasso(max_iter=10000,random_state=123)) pipe.fit(X_train,y_train) ``` El pico de la pandemia se grafica en la semana 22. ``` y_pred = pipe.predict(X_test) x_pred_future = np.arange(0,totalW,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) Xpeak = GetPeak((X_train,y_train)) Xpeak f = plt.figure(figsize=(18,8)) plt.title('Predicción Semanal de casos activos') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(x_pred_future[0:totalW-2],y_pred_future[0:totalW-2],'--g',label='Predict') plt.plot(x_pred_future[totalW-3:],y_pred_future[totalW-3:],'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-5000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-5000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pmp_{}_activos'.format(name)) y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (3 weeks) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (3 weeks) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ### Recuperados ``` totalW = len(df_w.index.values) X = df_w.index.values[0:totalW-3].reshape(-1,1) #Variable independiente. y = df_w['acumulado_recuperados'].values[0:totalW-3].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df_w.index.values[totalW-3:].reshape(-1,1) y_v = df_w['acumulado_recuperados'].values[totalW-3:].reshape(-1,1) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=5),Lasso(random_state=123,max_iter=10000)) pipe.fit(X_train,y_train) y_pred = pipe.predict(X_test) x_pred_future = np.arange(0,totalW,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) #Graph ordered data. f = plt.figure(figsize=(18,8)) plt.title('Predicción Semanal de recuperados') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(x_pred_future[0:totalW-2],y_pred_future[0:totalW-2],'--g',label='Predict') plt.plot(x_pred_future[totalW-3:],y_pred_future[totalW-3:],'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-5000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-5000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pmp_{}_recuperados'.format(name)) y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (3 weeks) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (3 weeks) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ``` ### Muertos ``` totalW = len(df_w.index.values) X = df_w.index.values[0:totalW-3].reshape(-1,1) #Variable independiente. y = df_w['acumulado_muertos'].values[0:totalW-3].reshape(-1,1) #Variable dependiente. #Datos de validación - nunca entran al modelo. X_v = df_w.index.values[totalW-3:].reshape(-1,1) y_v = df_w['acumulado_muertos'].values[totalW-3:].reshape(-1,1) X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=123,shuffle=True) pipe = make_pipeline(StandardScaler(),PolynomialFeatures(degree=7),Lasso(random_state=123,max_iter=10000)) pipe.fit(X_train,y_train) y_pred = pipe.predict(X_test) x_pred_future = np.arange(0,totalW,step=1) y_pred_future = pipe.predict(x_pred_future.reshape(-1,1)) Ymax = GetMax(y_train,y_test,y_pred,y_pred_future) #Graph ordered data. f = plt.figure(figsize=(18,8)) plt.title('Predicción Semanal de Muertes') plt.grid() plt.plot(X_train[X_train.flatten().argsort(),0],y_train[X_train.flatten().argsort(),0],'ob',markersize=3,label='Train') plt.plot(X_test[X_test.flatten().argsort(),0],y_test[X_test.flatten().argsort(),0],'or',markersize=2,label='Test') plt.plot(X_v,y_v,'o',color='gray',markersize=2,label='Validation') plt.plot(x_pred_future[0:totalW-2],y_pred_future[0:totalW-2],'--g',label='Predict') plt.plot(x_pred_future[totalW-3:],y_pred_future[totalW-3:],'--',color='orange',label='Future Predict') plt.plot([Xpeak,Xpeak],[-5000,Ymax],'--',color='lightseagreen', label='Peak') #Peak of cases. plt.ylim(-2000,Ymax) plt.legend(loc='upper left'); plt.savefig('docs/images/pmp_{}_muertes'.format(name)) y_pred_val = pipe.predict(X_v) print(' ---- Métricas ----') print('Total RMSE:\t\t{:.0f}'.format(np.sqrt(mean_squared_error(y_test, y_pred)))) print('Total MAE:\t\t{:.0f}'.format(mean_absolute_error(y_test,y_pred))) print('Future (3 weeks) RMSE:\t{:.0f}'.format(np.sqrt(mean_squared_error(y_v, y_pred_val)))) print('Future (3 weeks) MAE:\t{:.0f}'.format(mean_absolute_error(y_v,y_pred_val))) ```
github_jupyter
<a href="https://colab.research.google.com/github/edgarbc/my_autosleep_analysis/blob/main/my_autosleep_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd import matplotlib.pyplot as plt # get data from my google drive from google.colab import drive drive.mount('/content/drive') !pwd !ls datadir = '/content/drive/My Drive/Colab Notebooks/autosleep_data/' fname = 'AutoSleep-20201118-to-20201215.csv' print([datadir + fname]) df_data = pd.read_csv('/content/drive/My Drive/Colab Notebooks/autosleep_data/AutoSleep-20201118-to-20201215.csv') df_data.head() print(df_data['fromDate']) fig, ax = plt.subplots() plt.plot_date(df_data['fromDate'],df_data['efficiency']) ax.xaxis.set_tick_params(rotation=30, labelsize=10) plt.show() # plot the amount of deep sleep across days num_days = df_data['deep'].size df_deep = pd.DataFrame() for d in range(num_days): df_deep = df_deep.append({'mins':get_mins(df_data['deep'][d])}, ignore_index='True') # plot the deep sleep fig, ax = plt.subplots() plt.plot_date(df_data['fromDate'],df_deep['mins']) ax.xaxis.set_tick_params(rotation=30, labelsize=10) plt.show() def get_mins(time_str): h, m, s = time_str.split(':') return (int(h) * 3600 + int(m) * 60 + int(s))/60.0 import matplotlib.pyplot as plt from matplotlib.dates import (YEARLY, DateFormatter, rrulewrapper, RRuleLocator, drange) import numpy as np import datetime # Fixing random state for reproducibility np.random.seed(19680801) # tick every 5th easter rule = rrulewrapper(YEARLY, byeaster=1, interval=5) loc = RRuleLocator(rule) formatter = DateFormatter('%m/%d/%y') date1 = datetime.date(1952, 1, 1) date2 = datetime.date(2004, 4, 12) delta = datetime.timedelta(days=100) dates = drange(date1, date2, delta) s = np.random.rand(len(dates)) # make up some random y values fig, ax = plt.subplots() plt.plot_date(dates, s) ax.xaxis.set_major_locator(loc) ax.xaxis.set_major_formatter(formatter) ax.xaxis.set_tick_params(rotation=30, labelsize=10) plt.show() # Display data by week # take data starting by sunday # # Function to get data arranged by day def get_weeks_of_mont(start_day, month, year): # # example import calendar import numpy as np calendar.setfirstweekday(6) def get_week_of_month(year, month, day): x = np.array(calendar.monthcalendar(year, month)) week_of_month = np.where(x==day)[0][0] + 1 return(week_of_month) get_week_of_month(2015,9,14) ```
github_jupyter
# Saving and Loading Tutorial ## Preparing a virtual environment First, you need to have `Python3` and `openmpi` installed and running on your machine. In a new directory, here are the steps I took to create a virtual environment for this Jupyter notebook: echo "" echo "Preparing a virtual environment for NetPyNE" echo "=============================================================================" echo "Using Python version:" python3 --version echo "Using Python from:" which python3 echo "" echo "Creating a virtual environment: python3 -m venv env" echo "-----------------------------------------------------------------------------" python3 -m venv env echo "" echo "Activating virtual environment: source env/bin/activate" echo "-----------------------------------------------------------------------------" source env/bin/activate echo "" echo "Updating pip: python3 -m pip install --upgrade pip" echo "-----------------------------------------------------------------------------" python3 -m pip install --upgrade pip echo "" echo "Installing wheel: python3 -m pip install --upgrade wheel" echo "-----------------------------------------------------------------------------" python3 -m pip install --upgrade wheel echo "" echo "Installing ipython: python3 -m pip install --upgrade ipython" echo "-----------------------------------------------------------------------------" python3 -m pip install ipython echo "" echo "Installing NEURON: python3 -m pip install --upgrade neuron" echo "-----------------------------------------------------------------------------" python3 -m pip install --upgrade neuron echo "" echo "Cloning NetPyNE: git clone https://github.com/Neurosim-lab/netpyne.git" echo "-----------------------------------------------------------------------------" git clone https://github.com/Neurosim-lab/netpyne.git echo "" echo "Installing NetPyNE: python3 -m pip install -e netpyne" echo "-----------------------------------------------------------------------------" python3 -m pip install -e netpyne echo "" echo "Installing ipykernel for Jupyter: python3 -m pip install --upgrade ipykernel" echo "-----------------------------------------------------------------------------" python3 -m pip install --upgrade ipykernel echo "" echo "Installing Jupyter: python3 -m pip install --upgrade jupyter" echo "-----------------------------------------------------------------------------" python3 -m pip install --upgrade jupyter echo "" echo "Creating a kernel for Jupyter: ipython kernel install --user --name=env" echo "-----------------------------------------------------------------------------" ipython kernel install --user --name=env echo "" echo "=============================================================================" echo "Your virtual environment is ready for use." echo "" echo "To deactivate, execute: deactivate" echo "To reactivate, execute: source env/bin/activate" echo "=============================================================================" ## Copying this tutorial For convenience, let's copy this tutorial's directory up to the directory we're working in and then change into that directory. pwd cp -r netpyne/netpyne/tutorials/saving_loading_tut . cd saving_loading_tut pwd ## Normal saving Then we'll run a simulation with normal saving, using `saving_netParams.py` (which is used by all simulations in this tutorial), `saving_normal_cfg.py`, and `saving_normal_init.py`. Let's take a look at `saving_normal_init.py`, to see the standard way to run and save a simulation: from netpyne import sim cfg, netParams = sim.readCmdLineArgs( simConfigDefault='saving_normal_cfg.py', netParamsDefault='saving_netParams.py') sim.initialize(simConfig=cfg, netParams=netParams) sim.net.createPops() sim.net.createCells() sim.net.connectCells() sim.net.addStims() sim.setupRecording() sim.runSim() sim.gatherData() sim.saveData() sim.analysis.plotData() We could run this on a single core using `python3 saving_normal_init.py` (if we just want the output) or `ipython -i saving_normal_init.py` (if we wanted to interact with the simulation afterwards. But we will run this on multiple cores using the following command: ``` !mpiexec -n 4 nrniv -python -mpi saving_normal_init.py ``` This command does not currently exit to the system prompt, so you will have to restart your kernel. In the menu bar above, click on `Kernel`, then `Restart`, then `Restart`. The `whos` in the next cell should return `Interactive namespace is empty.` after the Kernel has been cleared. ``` whos ``` The simulation should have produced a directory called `saving_normal_data` with three analysis plots and a data file named `saving_normal_data.pkl`. We are now going to load the simulation from this file and produce the same plots. ``` from netpyne import sim sim.loadAll('saving_normal_data/saving_normal_data.pkl') sim.analysis.plotConn(saveFig='saving_normal_data/saving_normal_plot_conn_pop_strength_matrix_FROMFILE.png'); sim.analysis.plotRaster(saveFig='saving_normal_data/saving_normal_raster_gid_FROMFILE.png'); sim.analysis.plotTraces(saveFig='saving_normal_data/saving_normal_traces_FROMFILE.png'); ``` Compare the plots, they should be identical. Congratulations! You have run a simulation, saved the data, then loaded it later to perform more analysis. Now restart your kernel and check the `whos`. ``` whos ``` ## Distributed Saving If you're running large sims, you may want to save the data from each node in a separate file, i.e. distributed saving. We'll run a simulation using distributed saving and loading using `saving_netParams.py` (which is used by all simulations in this tutorial), `saving_dist_cfg.py`, and `saving_dist_init.py`. The only changes to the cfg file are renaming the simulation: cfg.simLabel = 'saving_dist' Our init file for distributed saving looks like this: from netpyne import sim cfg, netParams = sim.readCmdLineArgs( simConfigDefault='saving_dist_cfg.py', netParamsDefault='saving_netParams.py') sim.initialize(simConfig=cfg, netParams=netParams) sim.net.createPops() sim.net.createCells() sim.net.connectCells() sim.net.addStims() sim.setupRecording() sim.runSim() #sim.gatherData() #sim.saveData() ##### new ##### sim.saveDataInNodes() sim.gatherDataFromFiles() ##### end new ##### sim.analysis.plotData() We turned off `gatherData` and `saveData` and replaced those with `saveDataInNodes` and `gatherDataFromFiles`. Let's run the simulation now. ``` !mpiexec -n 4 nrniv -python -mpi saving_dist_init.py ``` That should have produced a directory `saving_dist_data` containing the same three analysis plots and a `node_data` directory containing a data file from each of the four nodes we used. Now restart your kernel so we can load the data from file analyze it again. The `whos` in the next cell should return `Interactive namespace is empty.` ``` whos from netpyne import sim sim.gatherDataFromFiles(simLabel='saving_dist') sim.analysis.plotConn(saveFig='saving_dist_data/saving_dist_plot_conn_pop_strength_matrix_FROMFILE.png'); sim.analysis.plotRaster(saveFig='saving_dist_data/saving_dist_raster_gid_FROMFILE.png'); sim.analysis.plotTraces(saveFig='saving_dist_data/saving_dist_traces_FROMFILE.png'); ``` Compare the plots, they should be identical except for the connectivity plot, which didn't retain the connectivity for the background inputs. Now restart your kernel and check the `whos`. ``` whos ``` ## Interval Saving Perhaps you want to save data at intervals in case you have large, long simulations you're worried won't complete. We'll run a simulation using interval saving and loading using `saving_netParams.py` (which is used by all simulations in this tutorial), `saving_int_cfg.py`, and `saving_int_init.py`. The only changes to the cfg file are renaming the simulation: cfg.simLabel = 'saving_int' and turning back on the saving of the data into one file: cfg.savePickle = True Our init file for interval saving looks like this: from netpyne import sim from netpyne import sim cfg, netParams = sim.readCmdLineArgs( simConfigDefault='saving_int_cfg.py', netParamsDefault='saving_netParams.py') sim.initialize(simConfig=cfg, netParams=netParams) sim.net.createPops() sim.net.createCells() sim.net.connectCells() sim.net.addStims() sim.setupRecording() #sim.runSim() ##### new ##### sim.runSimIntervalSaving(1000) ##### end new ##### sim.gatherData() sim.saveData() sim.analysis.plotData() We turned off `runSim` and replaced it with `runSimIntervalSaving(1000)`, which will save the simulation every 1000 ms. Let's run the simulation now. Remember you can run this without MPI using the command `python3 saving_int_init.py`. ``` !mpiexec -n 4 nrniv -python -mpi saving_int_init.py ``` That should have produced a directory `saving_int_data` containing the data file and the same three analysis plots (from the completed simulation) and an `interval_data` directory containing a data file for each 1000 ms of our 10,000 ms simulation. Now restart your kernel so we can load interval data from file. The `whos` in the next cell should return `Interactive namespace is empty.` ``` whos ``` Now, let's assume our simulation timed out, and the last interval save we got was at 10000 ms. We can still analyze that partial data. ``` from netpyne import sim sim.loadAll('saving_int_data/interval_data/interval_10000.pkl', createNEURONObj=False) sim.analysis.plotConn(saveFig='saving_int_data/saving_int_plot_conn_pop_strength_matrix_INTERVAL.png'); sim.analysis.plotRaster(saveFig='saving_int_data/saving_int_raster_gid_INTERVAL.png'); sim.analysis.plotTraces(saveFig='saving_int_data/saving_int_traces_INTERVAL.png'); ``` The connectivity, traces and raster plots should be identical. You can see that we recovered partial data. Congratulations! You have successfully saved, loaded, and analyzed simulation data in a variety of ways.
github_jupyter
``` import numpy import random import math import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline address = "" data = pd.read_csv(address); #To read csv file df = pd.DataFrame(data) df #z-score normalization df['signal_strength']=((df['signal_strength']-df['signal_strength'].min())/(df['signal_strength'].max()-df['signal_strength'].min()))*(100) df['speed']=((df['speed']-df['speed'].min())/(df['speed'].max()-df['speed'].min()))*100 df.rename(columns={"download/upload": "d_u"}, inplace = True) df # Preparing data to be tested df1 = df[(df['state'] == "Delhi") & (df['service_provider'] == "VODAFONE") & (df['Month'] == "July") & (df['Year'] == 2018) & (df['technology'] == "4G")] df1 = df1[['speed','signal_strength']] df1 = df1.values df1 #1). Constructing the weight matrix res = [ [ 0 for i in range(2) ] for j in range(len(df1)) ] for i in range(len(df1)): x = random.random() res[i][0] = x res[i][1] = 1-x print(res) #2). Calculating the respective centroids. #Centroid of cluster-1 old_c1 = [0,0] old_c2 = [0,0] c1 = [0,0] c2 = [0,0] m = 2 flag = 1 val = 0 while flag == 1: val += 1 print("Presently, in iteration ",val) flag = 0 c1 = [0,0] c2 = [0,0] weighted_sum_1 = 0 weighted_sum_2 = 0 #computing the centroids of each cluster for i in range(len(df1)): weighted_sum_1 += pow(res[i][0],m) # sum of powers of the membership function weighted_sum_2 += pow(res[i][1],m) # sum of powers of the membership function c1[0] += pow(res[i][0],m) * df1[i][0] c1[1] += pow(res[i][0],m) * df1[i][1] c2[0] += pow(res[i][1],m) * df1[i][0] c2[1] += pow(res[i][1],m) * df1[i][1] c1 = [x / weighted_sum_1 for x in c1] c2 = [x / weighted_sum_2 for x in c2] print(c1) print(c2) #4). Updating membership matrix for i in range(len(df1)): dist1 = math.sqrt((df1[i][0]-c1[0])**2 + (df1[i][1]-c1[1])**2) #distance from cluster-1 dist2 = math.sqrt((df1[i][0]-c2[0])**2 + (df1[i][1]-c2[1])**2) #distance from cluster-2 denominator = pow((1/dist1),(1/m-1)) + pow((1/dist2),(1/m-1)) res[i][0] = pow((1/dist1),(1/m-1)) / denominator res[i][1] = pow((1/dist2),(1/m-1)) / denominator #checking if centroids changed or not if (c1 == old_c1) and (c2 == old_c2): continue else: flag = 1 old_c1 = c1 old_c2 = c2 print("Completed finding centroid") print(res) ```
github_jupyter
``` # Libraries needed for NLP import nltk nltk.download('punkt') from nltk.stem.lancaster import LancasterStemmer stemmer = LancasterStemmer() # Libraries needed for Tensorflow processing import tensorflow as tf import numpy as np import tflearn import random import json from google.colab import files files.upload() # import our chat-bot intents file with open('intents.json') as json_data: intents = json.load(json_data) intents words = [] classes = [] documents = [] ignore = ['?'] # loop through each sentence in the intent's patterns for intent in intents['intents']: for pattern in intent['patterns']: # tokenize each and every word in the sentence w = nltk.word_tokenize(pattern) # add word to the words list words.extend(w) # add word(s) to documents documents.append((w, intent['tag'])) # add tags to our classes list if intent['tag'] not in classes: classes.append(intent['tag']) # Perform stemming and lower each word as well as remove duplicates words = [stemmer.stem(w.lower()) for w in words if w not in ignore] words = sorted(list(set(words))) # remove duplicate classes classes = sorted(list(set(classes))) print (len(documents), "documents") print (len(classes), "classes", classes) print (len(words), "unique stemmed words", words) # create training data training = [] output = [] # create an empty array for output output_empty = [0] * len(classes) # create training set, bag of words for each sentence for doc in documents: # initialize bag of words bag = [] # list of tokenized words for the pattern pattern_words = doc[0] # stemming each word pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # create bag of words array for w in words: bag.append(1) if w in pattern_words else bag.append(0) # output is '1' for current tag and '0' for rest of other tags output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row]) # shuffling features and turning it into np.array random.shuffle(training) training = np.array(training) # creating training lists train_x = list(training[:,0]) train_y = list(training[:,1]) # resetting underlying graph data tf.reset_default_graph() # Building neural network net = tflearn.input_data(shape=[None, len(train_x[0])]) net = tflearn.fully_connected(net, 10) net = tflearn.fully_connected(net, 10) net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax') net = tflearn.regression(net) # Defining model and setting up tensorboard model = tflearn.DNN(net, tensorboard_dir='tflearn_logs') # Start training model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True) model.save('model.tflearn') import pickle pickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( "training_data", "wb" ) ) # restoring all the data structures data = pickle.load( open( "training_data", "rb" ) ) words = data['words'] classes = data['classes'] train_x = data['train_x'] train_y = data['train_y'] with open('intents.json') as json_data: intents = json.load(json_data) # load the saved model model.load('./model.tflearn') def clean_up_sentence(sentence): # tokenizing the pattern sentence_words = nltk.word_tokenize(sentence) # stemming each word sentence_words = [stemmer.stem(word.lower()) for word in sentence_words] return sentence_words # returning bag of words array: 0 or 1 for each word in the bag that exists in the sentence def bow(sentence, words, show_details=False): # tokenizing the pattern sentence_words = clean_up_sentence(sentence) # generating bag of words bag = [0]*len(words) for s in sentence_words: for i,w in enumerate(words): if w == s: bag[i] = 1 if show_details: print ("found in bag: %s" % w) return(np.array(bag)) ERROR_THRESHOLD = 0.30 def classify(sentence): # generate probabilities from the model results = model.predict([bow(sentence, words)])[0] # filter out predictions below a threshold results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], r[1])) # return tuple of intent and probability return return_list def response(sentence, userID='123', show_details=False): results = classify(sentence) # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: # a random response from the intent return print(random.choice(i['responses'])) results.pop(0) classify('What are you hours of operation?') response('What are you hours of operation?') response('What is menu for today?') #Some of other context free responses. response('Do you accept Credit Card?') response('Where can we locate you?') response('That is helpful') response('Bye') #Adding some context to the conversation i.e. Contexualization for altering question and intents etc. # create a data structure to hold user context context = {} ERROR_THRESHOLD = 0.25 def classify(sentence): # generate probabilities from the model results = model.predict([bow(sentence, words)])[0] # filter out predictions below a threshold results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], r[1])) # return tuple of intent and probability return return_list def response(sentence, userID='123', show_details=False): results = classify(sentence) # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: # set context for this intent if necessary if 'context_set' in i: if show_details: print ('context:', i['context_set']) context[userID] = i['context_set'] # check if this intent is contextual and applies to this user's conversation if not 'context_filter' in i or \ (userID in context and 'context_filter' in i and i['context_filter'] == context[userID]): if show_details: print ('tag:', i['tag']) # a random response from the intent return print(random.choice(i['responses'])) results.pop(0) response('Can you please let me know the delivery options?') response('What is menu for today?') context response("Hi there!", show_details=True) response('What is menu for today?') ```
github_jupyter
## Predicting Missing links in a citation network ``` # global imports import random import numpy as np import pandas as pd import jgraph ## this was previously known as igraph import csv import matplotlib.pyplot as plt # machine learning imports from sklearn import svm from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from sklearn import preprocessing import spacy ``` ### Import datasets ``` # function to read data from txt files nodes_info_df = pd.read_csv('./data/node_information.csv') random_preds_df = pd.read_csv('./data/random_predictions.csv') test_set = pd.read_csv('./data/testing_set.txt', sep = ' ', header = None) train_set = pd.read_csv('./data/training_set.txt', sep = ' ', header = None) test_set.columns = ['source_id', 'target_id'] train_set.columns = ['source_id', 'target_id', 'label'] nodes_info_df.columns = ['paper_id', 'publication_year', 'title', 'author', 'journal_name', 'abstract'] ``` ## Exploratory Analysis ``` print('Unique papers: ', len(set(nodes_info_df['paper_id']))) sym_diff = set(test_set['source_id'].append(test_set['target_id'])).symmetric_difference(set(nodes_info_df['paper_id'])) print('Unknown papers in test set (with nodes_info):', len(sym_diff)) # # get distribution of journal names # nodes_info_df['journal_name'] = nodes_info_df['journal_name'].fillna('unknown') # nodes_info_df.journal_name.value_counts()[:15] # nodes_info_df.author ``` ## Feature generation ``` #Load Spacy import en_core_web_sm spacy_nlp = en_core_web_sm.load(disable=["tagger", "parser","ner","entity_linker","textcat","entity_ruler","sentencizer","merge_noun_chunks","merge_entities","merge_subtokens"]) ``` ### Text features generation ``` import re import math from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import cosine_similarity from sklearn.decomposition import PCA def isNaN(string): return string != string def filter_bad(alphabet): bad = [',', None] if(alphabet in bad): return False else: return True ## possible formats of authors: # several authors: separation via ',' # sometimes mentions the university eg '(montpellier)' # sometimes mentions the first name # sometimes format is: firstname letter. lastname def author_normalisation(authors): if isNaN(authors) == False: #print(authors) authors = authors.lower() final_authors = list() # remove universities and last space if '(' in authors: authors = re.sub(r'\(+.*\)', '', authors).strip() # remove extra spaces authors = authors.split() authors = ' '.join(filter(filter_bad, authors)) # get all authors of one paper for author in authors.split(', '): author.strip() # get the names of an author names = author.split(' ') author_names = list() if len(names) == 2: # check if first element is 'letter.' format: if re.match('\w\.', names[0]): author_names.append(names[0]) else: author_names.append(names[0][0] + '.') if len(names) == 3: if re.match('\w\.', names[0]): author_names.append(names[0]) else: author_names.append(names[0][0] + '.') # skip the second middle name if re.match('\w\.', names[1]): pass #author_names.append(names[1]) #else: # author_names.append(names[1][0] + '.') author_names.append(names[-1]) if len(author_names) > 1: author_names = ' '.join(author_names) else: author_names = author_names[0] # append last name final_authors.append(author_names) number_of_authors = len(final_authors) if number_of_authors == 0: return np.NaN return final_authors return np.NaN def common_authors(string1, string2): if isNaN(string1): return False if isNaN(string2): return False #a_set = set(string1.split(',')) #b_set = set(string2.split(',')) a_set = set(string1) b_set = set(string2) if (a_set & b_set): return True else: return False def number_common_authors(string1, string2): pass def remove_special_characters(string): string = re.sub("([^\w]|[\d_])+", " ", string) return string def tokenize(string): # Code to tokenize spacy_tokens = spacy_nlp(string) # Code to remove punctuation tokens and create string tokens string_tokens = [token.lemma_ for token in spacy_tokens if not token.is_punct if not token.is_stop] return string_tokens def recombining_tokens_into_a_string(list_of_tokens): return " ".join(list_of_tokens) def create_tf_idf(column,tf_idf): #if tf_idf doesn't exist if tf_idf==None: #create a TfidfVectorizer object tf_idf = TfidfVectorizer() #Vectorize the sample text X_tfidf_sample = tf_idf.fit_transform(column) #if tf_idf already exist use the same for the test else: X_tfidf_sample = tf_idf.transform(column) return X_tfidf_sample,tf_idf def tf_idf_feature(column,dataset,tf_idf,author_or_not): #Remove special characters from the text dataset[column]=dataset[column].apply(lambda x: remove_special_characters(x)) #if we deal with the column author if author_or_not==1: # Remove strings of size less than two column_cleaned= dataset[column].str.findall('\w{2,}').str.join(' ') else: #Tokenize, extract lemmas and remove stop words tokenized=dataset[column].apply(lambda x: tokenize(x)) #Recombine tokens into a string column_cleaned=tokenized.apply(recombining_tokens_into_a_string) # Create the tf_idf matrix tf_idf_matrix,tf_idf=create_tf_idf(column_cleaned,tf_idf) return tf_idf_matrix,tf_idf # Compute the similarity between a column target and source def compute_similarity(column,df_source,df_target,author_or_not): #Fill the Na's df_source[column].fillna("unknown", inplace=True) df_target[column].fillna("unknown", inplace=True) tf_idf=None #Create the tf_idf features tf_idf_title_source,tf_idf=tf_idf_feature(column,df_source,tf_idf,author_or_not) tf_idf_title_target,tf_idf=tf_idf_feature(column,df_target,tf_idf,author_or_not) #Calculate the similarities similarity=[] for i in range(tf_idf_title_source.shape[0]): cos_sim=cosine_similarity(tf_idf_title_source[i], tf_idf_title_target[i]) similarity.append(cos_sim) #Convert the list as a DataFrame similarity_df=pd.DataFrame(np.vstack(similarity)) return similarity_df def reduce_matrix_width(source_df,target_df,n_components): # Apply a PCA to reduce the matrix width , we chose 15 pca_train = PCA(n_components=n_components) #PCA on source feature pca_train.fit(source_df) matrix_source_reduced = pca_train.transform(source_df) print(sum(pca_train.explained_variance_ratio_)) # Percentage of initial matrix explained by reduced matrix #PCA on target feature pca_train.fit(target_df) matrix_target_reduced = pca_train.transform(target_df) print(sum(pca_train.explained_variance_ratio_)) # Percentage of initial matrix explained by reduced matrix return matrix_source_reduced,matrix_target_reduced def journal_name_feature(): #We first merge train and test to avoid a different number of features when one-hot-encoding #To keep trace of the train and test dataset train_source_info['train_test']=1 train_target_info['train_test']=1 test_source_info['train_test']=0 test_target_info['train_test']=0 # merging the two datasets together combined_source=pd.concat([train_source_info,test_source_info],ignore_index=True) combined_target=pd.concat([train_target_info,test_target_info],ignore_index=True) # One hot encoding journal_name_encoded_source=pd.get_dummies(combined_source['journal_name']) journal_name_encoded_target=pd.get_dummies(combined_target['journal_name']) #Apply PCA to reduce matrix with 15 components journal_name_encoded_source_reduced,journal_name_encoded_target_reduced =reduce_matrix_width(journal_name_encoded_source,journal_name_encoded_target,15) # Merge encoded dataset with the combine dataset combined_source=pd.concat([combined_source,pd.DataFrame(journal_name_encoded_source_reduced)],axis=1) combined_target=pd.concat([combined_target,pd.DataFrame(journal_name_encoded_target_reduced)],axis=1) #Separate train and test and keep only journal_name features train_source_journal=combined_source[combined_source["train_test"]==1].drop(['abstract','author','journal_name','label','paper_id','publication_year','source_id','target_id','title','train_test'], axis=1) test_source_journal=combined_source[combined_source["train_test"]==0].drop(['abstract','author','journal_name','label','paper_id','publication_year','source_id','target_id','title','train_test'], axis=1) train_target_journal=combined_target[combined_target["train_test"]==1].drop(['abstract','author','journal_name','label','paper_id','publication_year','source_id','target_id','title','train_test'], axis=1) test_target_journal=combined_target[combined_target["train_test"]==0].drop(['abstract','author','journal_name','label','paper_id','publication_year','source_id','target_id','title','train_test'], axis=1) #add prefix to columns names train_source_journal.columns=[str(col) + '_source' for col in train_source_journal.columns] test_source_journal.columns=[str(col) + '_source' for col in test_source_journal.columns] train_target_journal.columns=[str(col) + '_target' for col in train_target_journal.columns] test_target_journal.columns=[str(col) + '_target' for col in test_target_journal.columns] return train_source_journal,test_source_journal,train_target_journal,test_target_journal # reaye source and target info datasets train_source_info = train_set.merge(nodes_info_df, left_on='source_id', right_on='paper_id',how="left") train_target_info = train_set.merge(nodes_info_df, left_on='target_id', right_on='paper_id',how="left") test_source_info = test_set.merge(nodes_info_df, left_on='source_id', right_on='paper_id',how="left") test_target_info = test_set.merge(nodes_info_df, left_on='target_id', right_on='paper_id',how="left") ## apply the features to training set train_set['source_authors'] = train_source_info.author.apply(lambda x: author_normalisation(x)) train_set['target_authors'] = train_target_info.author.apply(lambda x: author_normalisation(x)) train_set['publication_year_diff'] = train_source_info.publication_year - train_target_info.publication_year train_set['source_journal'] = train_source_info.journal_name train_set['target_journal'] = train_target_info.journal_name train_set['same_journal'] = train_set.apply(lambda x: int(x.source_journal == x.target_journal), axis=1) ## apply the features to test set test_set['source_authors'] = test_source_info.author.apply(lambda x: author_normalisation(x)) test_set['target_authors'] = test_target_info.author.apply(lambda x: author_normalisation(x)) test_set['publication_year_diff'] = test_source_info.publication_year - test_target_info.publication_year test_set['source_journal'] = test_source_info.journal_name test_set['target_journal'] = test_target_info.journal_name test_set['same_journal'] = test_set.apply(lambda x: int(x.source_journal == x.target_journal), axis=1) #other features this might take some times to run ## apply the features to training set train_set['similarity_title']=compute_similarity("title",train_source_info,train_target_info,0) train_set['similarity_abstract']=compute_similarity("abstract",train_source_info,train_target_info,0) train_set['similarity_author']=compute_similarity("author",train_source_info,train_target_info,1) ## apply features to test set test_set['similarity_title']=compute_similarity("title",test_source_info,test_target_info,0) test_set['similarity_abstract']=compute_similarity("abstract",test_source_info,test_target_info,0) test_set['similarity_author']=compute_similarity("author",test_source_info,test_target_info,1) #journal_name feature train_source_journal,test_source_journal,train_target_journal,test_target_journal =journal_name_feature() #Add journal_name to the train and test train_set=pd.concat([train_set,train_source_journal],axis=1,) train_set=pd.concat([train_set,train_target_journal],axis=1) test_set=pd.concat([test_set,test_source_journal.reset_index().drop(["index"],axis=1)],axis=1) test_set=pd.concat([test_set,test_target_journal.reset_index().drop(["index"],axis=1)],axis=1) ``` ### Graph features generation ``` import networkx as nx # get some elements and then assign the attributes -> this is shite so ignore it def shortest_path_info(some_graph, source, target): if source not in some_graph.nodes(): return -1 # not known if target not in some_graph.nodes(): return -1 # not known if nx.has_path(some_graph, source, target): return nx.dijkstra_path_length(some_graph, source=source, target=target) return -2 # no path def degree_centrality(some_graph): degree_dict = dict(some_graph.degree(some_graph.nodes())) return degree_dict def get_in_out_degree(some_graph): in_degree_dict = dict(some_graph.in_degree(some_graph.nodes())) out_degree_dict = dict(some_graph.out_degree(some_graph.nodes())) return in_degree_dict, out_degree_dict def common_neighs(some_graph, x, y): if x not in some_graph.nodes(): return 0,[] # not known if y not in some_graph.nodes(): return 0,[] # not known neighs = sorted(list(nx.common_neighbors(some_graph, x, y))) return len(neighs), neighs def jac_index(g, x, y): if x not in g.nodes(): return -1 # not known if y not in g.nodes(): return -1 # not known preds = nx.jaccard_coefficient(g, [(x, y)]) jacc = 0 for u, v, p in preds: jacc = p return jacc def pref_attachement(g, x, y): if x not in g.nodes(): return -1 # not known if y not in g.nodes(): return -1 # not known preds = nx.preferential_attachment(g, [(x, y)]) pref = 0 for u, v, p in preds: pref = p return pref def aa_index(g, x, y): if x not in g.nodes(): return -1 # not known if y not in g.nodes(): return -1 # not known preds = nx.adamic_adar_index(g, [(x, y)]) aa = 0 for u, v, p in preds: aa = p return aa # create the network # get network for when there is a connection in train set # edges = list(zip(train_set.loc[train_set.label == 1].source_id, train_set.loc[train_set.label == 1].target_id)) # nodes = list(set(train_set.source_id + train_set.target_id)) # train_G = nx.DiGraph() # train_G.add_nodes_from(nodes) # train_G.add_edges_from(edges) train_G = nx.from_pandas_edgelist(train_set, source='source_id', target='target_id', edge_attr=None, create_using=nx.DiGraph()) # make sure you also have an undirected graph train_G_ud = train_G.to_undirected() # create some dictionaries to use later on clustering_coeff_dict = nx.clustering(train_G_ud) avg_neigh_degree_dict = nx.average_neighbor_degree(train_G) out_degree_centrality = nx.out_degree_centrality(train_G) in_degree_centrality = nx.in_degree_centrality(train_G) page_rank = nx.pagerank_scipy(train_G) hub_score, authority_score = nx.hits(train_G) # function to get features for graph of a single element def get_features(directed_graph, ud_graph, source_id, target_id, label): # features for undirected graph jaccard_index = jac_index(ud_graph, source_id, target_id) preferencial_attachment = pref_attachement(ud_graph, source_id, target_id) number_common_neighbours, common_neighbours = common_neighs(ud_graph, source_id, target_id) adamic_adar_index = aa_index(ud_graph, source_id, target_id) #shortest_path = shortest_path_info(train_G, source_id, target_id) source_pr = page_rank[source_id] source_hub_score = hub_score[source_id] source_authority_score = authority_score[source_id] source_cluster_coeff = clustering_coeff_dict[source_id] source_out_centrality = out_degree_centrality[source_id] source_avg_neigh_degree = avg_neigh_degree_dict[source_id] target_pr = page_rank[target_id] target_hub_score = hub_score[target_id] target_authority_score = authority_score[target_id] target_cluster_coeff = clustering_coeff_dict[target_id] target_in_centrality = in_degree_centrality[target_id] target_avg_neigh_degree = avg_neigh_degree_dict[target_id] # no name feature but supposedly important feature_n = source_out_centrality * target_in_centrality return [source_id, target_id, label, jaccard_index, preferencial_attachment, number_common_neighbours, adamic_adar_index, source_pr, target_pr, source_hub_score, target_hub_score, source_authority_score, target_authority_score, source_cluster_coeff, target_cluster_coeff, source_out_centrality, target_in_centrality, source_avg_neigh_degree, target_avg_neigh_degree, feature_n] ``` ### IMPORTANT: add column names when adding new features to the dataset ``` ### add columns when you add Features column_names = ['source_id', 'target_id', 'label', 'jaccard_index', 'preferential_attachement', 'number_common_neighbours', 'adamic_adar_index', 'source_pr', 'target_pr', 'source_hub_score', 'target_hub_score', 'source_authority_score', 'target_authority_score', 'source_cluster_coeff', 'target_cluster_coeff', 'source_out_centrality', 'target_in_centrality', 'source_avg_neigh_degree', 'target_avg_neigh_degree', 'feature_n'] final_train_set = pd.DataFrame([[np.nan]*len(column_names)]* train_set.shape[0], columns=column_names) final_test_set = pd.DataFrame([[np.nan]*len(column_names)]* test_set.shape[0], columns=column_names) # create the features for the train set for idx, row in train_set.iterrows(): features = get_features(train_G, train_G_ud, row.source_id, row.target_id, row.label) #update the features final_train_set.loc[idx] = features #create the features for the test set for idx, row in test_set.iterrows(): features = get_features(train_G, train_G_ud, row.source_id, row.target_id, -1) #update the features final_test_set.loc[idx] = features # merge graph and text features together train_set = train_set.merge(final_train_set, on=['source_id', 'target_id', 'label'], how='left') test_set = test_set.merge(final_test_set, on=['source_id', 'target_id'], how='left') from networkx import betweenness_centrality from networkx import edge_betweenness_centrality from networkx import load_centrality from networkx import eigenvector_centrality def graph_features(directed_graph, dataframe_dataset): # betweenness between_centrality = betweenness_centrality(directed_graph) # shortest-path betweenness centrality for nodes # load centrality ld_centrality = load_centrality(directed_graph) # load centrality of a node is the fraction of all shortest paths that pass through that node #eigenvector centrality eig_centrality = eigenvector_centrality(directed_graph) # save features to training set dataframe_dataset['betweeness_centrality'] = pd.DataFrame.from_dict(dict(eig_centrality), orient='index') dataframe_dataset['load_centrality'] = pd.DataFrame.from_dict(dict(ld_centrality), orient='index') dataframe_dataset['eigen_centrality'] = pd.DataFrame.from_dict(dict(eig_centrality), orient='index') return dataframe_dataset train_set = graph_features(train_G, train_set) train_set.betweeness_centrality.fillna(-1, inplace=True) train_set.load_centrality.fillna(-1, inplace=True) train_set.eigen_centrality.fillna(-1, inplace=True) test_set = graph_features(train_G, test_set) test_set.betweeness_centrality.fillna(-1, inplace=True) test_set.load_centrality.fillna(-1, inplace=True) test_set.eigen_centrality.fillna(-1, inplace=True) # write out so that you do not have to run everything again train_set.to_csv('final_train.csv',index=False) test_set.to_csv('final_test.csv', index=False) ``` ### Can start from here as well when features were saved previously ``` test_set = pd.read_csv('final_test.csv') train_set = pd.read_csv('final_train.csv') ``` ### Final clean (i.e replacing nans etc) ``` # fill nas in some way train_set.publication_year_diff.fillna(-24, inplace=True) # 24 is for unknown (?) train_set.fillna('unknown', inplace=True) test_set.publication_year_diff.fillna(-24, inplace=True) # 24 is for unknown (?_) test_set.fillna('unknown', inplace=True) test_set.head() train_set.head() # check the types of each column (none should be object) train_set.dtypes %matplotlib inline ## Most interesting correlation is with label import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(14,12)) sns.heatmap(train_set.corr(), vmax=0.5, square=True, annot=True) ``` ## Learning Stuff ``` # separate features from labels: X = train_set.loc[:, (train_set.columns != 'label') & (train_set.columns != 'common_authors') & (train_set.columns != 'source_authors') & (train_set.columns != 'target_authors') & (train_set.columns != 'source_journal') & (train_set.columns != 'target_journal') ] y = train_set['label'] y.astype(np.int) # final feature correlation ff = X.copy() ff['label'] = y plt.figure(figsize=(14,12)) sns.heatmap(X.corr(), vmax=0.5, square=True, annot=True) ## Train different models and compare the performance from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, BaggingClassifier, GradientBoostingClassifier from sklearn.metrics import f1_score, confusion_matrix from sklearn.model_selection import cross_validate model = AdaBoostClassifier(n_estimators=75, learning_rate=1) scores = cross_validate(model, X, y, scoring='f1', cv=5) # n_jobs is the number of cpus to use -1 => all scores # describe results from scores from scipy import stats stats.describe(scores['test_score']) model = RandomForestClassifier() scores = cross_validate(model, X, y, scoring='f1', cv=5) # n_jobs is the number of cpus to use -1 => all scores # describe results from scores from scipy import stats stats.describe(scores['test_score']) ``` ### Recursive feature selection ``` # ## ONLY RUN AT THE END FOR GRAPHS.. takes a v.long time to execute (been 3hours for now.. only execute on a virtual # # machine with GPUs (if possible)) # from sklearn.feature_selection import RFECV # clf_rf_4 = model # rfecv = RFECV(estimator=clf_rf_4, step=1, cv=10,scoring='f1') #10-fold cross-validation # rfecv = rfecv.fit(X, y) # print('Optimal number of features :', rfecv.n_features_) # print('Best features :', X.columns[rfecv.support_]) # Plot number of features VS. cross-validation scores import matplotlib.pyplot as plt plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score of number of selected features") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() ``` ## prior to authors: DescribeResult(nobs=10, minmax=(0.7092423428264374, 0.7505859928392963), mean=0.7330286516063008, variance=0.0002449243278408503, skewness=-0.16892931758355367, kurtosis=-1.5003847605685021) after some basic graphs: DescribeResult(nobs=10, minmax=(0.9537111539570966, 0.9556853523477206), mean=0.9544708719147975, variance=4.3393884483164826e-07, skewness=0.7947367347642024, kurtosis=-0.6317507457312379) ### Comparing models ## 1. XG Boost 1.1 XGboost base model ``` from xgboost.sklearn import XGBClassifier # making sure the test and the train files have same sequence of columns test = test[X.columns] # defining the base model xgb_model_base = XGBClassifier(n_estimators = 100) # printing the cross validation scores for the classifier scores = cross_validate(xgb_model_base, X, y.values.ravel(), scoring='f1', cv=3,n_jobs = -1 ) # n_jobs is the number of cpus to use -1 => all scores # fitting on the training data xgb_model_base.fit(X, y.values.ravel()) # predicting the outcome from the final predictions = xgb_model_base.predict(test) # write out out_df = test_set.copy() data = {'id': list(out_df.index), 'category': predictions} final_df = pd.DataFrame(data) # 3: write file out final_df.to_csv('submission.csv',index=False, sep=',') ``` 1.2 XgBosst with random search ``` # defining the search grid random_grid = { "n_estimators" : [int(x) for x in np.linspace(50, 600, num = 20)], "learning_rate" : [0.01, 0.02, 0.05, 0.10 ] , "max_depth" : [ 6, 8, 10, 12, 15, 20], "min_child_weight" : [ 1, 3, 5, 7 ], "gamma" : [ 0.3, 0.4, 0.7, 0.9 ], "colsample_bytree" : [ 0.05, 0.1, 0.3, 0.4] } # Use the random grid to search for best hyperparameters # First create the base model to tune xgb_model = XGBClassifier() # Random search of parameters xgb_random = RandomizedSearchCV(estimator = xgb_model, param_distributions = random_grid, n_iter = 10, cv = 3, verbose=2, random_state=42 ,n_jobs = -1, scoring = 'f1_weighted') optimised_xgb_random = xgb_random.best_estimator_ # printing the cross validation scores for the classifier scores = cross_validate(optimised_xgb_random, X, y.values.ravel(), scoring='f1', cv=3,n_jobs = -1 ) # n_jobs is the number of cpus to use -1 => all scores # fitting on the training data xgb_model_base.fit(X, y.values.ravel()) # predicting the outcome from the final optimised_xgb_random.predict(test) # write out out_df = test_set.copy() data = {'id': list(out_df.index), 'category': predictions} final_df = pd.DataFrame(data) # 3: write file out final_df.to_csv('submission.csv',index=False, sep=',') ``` ## 2. Support Vector Machine ``` from sklearn.svm import LinearSVC # SVM has a zero tolerance towards null values, hence replacing them by 0 XVM = X.fillna(value=0) test_SVM = test.fillna(value=0) clf = LinearSVC( tol=1e-4) # printing the cross validation scores for the classifier scores = cross_validate(clf, XVM, y, scoring='f1', cv=10,n_jobs = -1 ) # n_jobs is the number of cpus to use -1 => all scores # fitting on the training data clf.fit(XVM, y) # predicting the outcome from the final prediction_clf = clf.predict(test_SVM) # write out out_df = test_set.copy() data = {'id': list(out_df.index), 'category': predictions} final_df = pd.DataFrame(data) # 3: write file out final_df.to_csv('submission.csv',index=False, sep=',') ``` ## 3. Random Forest ``` from sklearn.ensemble import RandomForestClassifier # 1: retrain the complete model -> don't forget to change this to optimal one @ end final_model = RandomForestClassifier() final_model.fit(X, y) # 2: predict on the test set final_test_set = test_set.loc[:, (test_set.columns != 'source_authors') & (test_set.columns != 'common_authors') & (test_set.columns != 'target_authors')& (test_set.columns != 'label')& (test_set.columns != 'source_journal') & (test_set.columns != 'target_journal')] predictions = final_model.predict(final_test_set) # write out out_df = test_set.copy() data = {'id': list(out_df.index), 'category': predictions} final_df = pd.DataFrame(data) # 3: write file out final_df.to_csv('submission.csv',index=False, sep=',') # plot the feature importance feat_importances = pd.Series(final_model.feature_importances_, index=X.columns) feat_importances.nlargest(10).plot(kind='barh') plt.show() ``` ## The end
github_jupyter
## Dependencies ``` import json, warnings, shutil, glob from jigsaw_utility_scripts import * from scripts_step_lr_schedulers import * from transformers import TFXLMRobertaModel, XLMRobertaConfig from tensorflow.keras.models import Model from tensorflow.keras import optimizers, metrics, losses, layers SEED = 0 seed_everything(SEED) warnings.filterwarnings("ignore") pd.set_option('max_colwidth', 120) pd.set_option('display.float_format', lambda x: '%.4f' % x) ``` ## TPU configuration ``` strategy, tpu = set_up_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync) AUTO = tf.data.experimental.AUTOTUNE ``` # Load data ``` database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-1-clean-polish/' k_fold = pd.read_csv(database_base_path + '5-fold.csv') valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv", usecols=['comment_text', 'toxic', 'lang']) print('Train samples: %d' % len(k_fold)) display(k_fold.head()) print('Validation samples: %d' % len(valid_df)) display(valid_df.head()) base_data_path = 'fold_1/' fold_n = 1 # Unzip files !tar -xf /kaggle/input/jigsaw-data-split-roberta-192-ratio-1-clean-polish/fold_1.tar.gz ``` # Model parameters ``` base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/' config = { "MAX_LEN": 192, "BATCH_SIZE": 128, "EPOCHS": 3, "LEARNING_RATE": 1e-5, "ES_PATIENCE": None, "base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5', "config_path": base_path + 'xlm-roberta-large-config.json' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ``` ## Learning rate schedule ``` lr_min = 1e-7 lr_start = 0 lr_max = config['LEARNING_RATE'] step_size = (len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) * 2) // config['BATCH_SIZE'] total_steps = config['EPOCHS'] * step_size hold_max_steps = 0 warmup_steps = total_steps * 0.1 decay = .9998 rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])] y = [exponential_schedule_with_warmup(tf.cast(x, tf.float32), warmup_steps=warmup_steps, hold_max_steps=hold_max_steps, lr_start=lr_start, lr_max=lr_max, lr_min=lr_min, decay=decay) for x in rng] sns.set(style="whitegrid") fig, ax = plt.subplots(figsize=(20, 6)) plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) ``` # Model ``` module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False) def model_fn(MAX_LEN): input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids') attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask') base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config) last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask}) cls_token = last_hidden_state[:, 0, :] output = layers.Dense(1, activation='sigmoid', name='output')(cls_token) model = Model(inputs=[input_ids, attention_mask], outputs=output) return model ``` # Train ``` # Load data x_train = np.load(base_data_path + 'x_train.npy') y_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32) x_valid = np.load(base_data_path + 'x_valid.npy') y_valid = np.load(base_data_path + 'y_valid_int.npy').reshape(x_valid.shape[1], 1).astype(np.float32) x_valid_ml = np.load(database_base_path + 'x_valid.npy') y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32) #################### ADD TAIL #################### x_train_tail = np.load(base_data_path + 'x_train_tail.npy') y_train_tail = np.load(base_data_path + 'y_train_int_tail.npy').reshape(x_train_tail.shape[1], 1).astype(np.float32) x_train = np.hstack([x_train, x_train_tail]) y_train = np.vstack([y_train, y_train_tail]) step_size = x_train.shape[1] // config['BATCH_SIZE'] valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE'] valid_2_step_size = x_valid.shape[1] // config['BATCH_SIZE'] # Build TF datasets train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED)) valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED)) valid_2_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid, y_valid, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED)) train_data_iter = iter(train_dist_ds) valid_data_iter = iter(valid_dist_ds) valid_2_data_iter = iter(valid_2_dist_ds) # Step functions @tf.function def train_step(data_iter): def train_step_fn(x, y): with tf.GradientTape() as tape: probabilities = model(x, training=True) loss = loss_fn(y, probabilities) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_auc.update_state(y, probabilities) train_loss.update_state(loss) for _ in tf.range(step_size): strategy.experimental_run_v2(train_step_fn, next(data_iter)) @tf.function def valid_step(data_iter): def valid_step_fn(x, y): probabilities = model(x, training=False) loss = loss_fn(y, probabilities) valid_auc.update_state(y, probabilities) valid_loss.update_state(loss) for _ in tf.range(valid_step_size): strategy.experimental_run_v2(valid_step_fn, next(data_iter)) @tf.function def valid_2_step(data_iter): def valid_step_fn(x, y): probabilities = model(x, training=False) loss = loss_fn(y, probabilities) valid_2_auc.update_state(y, probabilities) valid_2_loss.update_state(loss) for _ in tf.range(valid_2_step_size): strategy.experimental_run_v2(valid_step_fn, next(data_iter)) # Train model with strategy.scope(): model = model_fn(config['MAX_LEN']) lr = lambda: exponential_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32), warmup_steps=warmup_steps, hold_max_steps=hold_max_steps, lr_start=lr_start, lr_max=lr_max, lr_min=lr_min, decay=decay) optimizer = optimizers.Adam(learning_rate=lr) loss_fn = losses.binary_crossentropy train_auc = metrics.AUC() valid_auc = metrics.AUC() valid_2_auc = metrics.AUC() train_loss = metrics.Sum() valid_loss = metrics.Sum() valid_2_loss = metrics.Sum() metrics_dict = {'loss': train_loss, 'auc': train_auc, 'val_loss': valid_loss, 'val_auc': valid_auc, 'val_2_loss': valid_2_loss, 'val_2_auc': valid_2_auc} history = custom_fit_2(model, metrics_dict, train_step, valid_step, valid_2_step, train_data_iter, valid_data_iter, valid_2_data_iter, step_size, valid_step_size, valid_2_step_size, config['BATCH_SIZE'], config['EPOCHS'], config['ES_PATIENCE'], save_last=False) # model.save_weights('model.h5') # Make predictions # x_train = np.load(base_data_path + 'x_train.npy') # x_valid = np.load(base_data_path + 'x_valid.npy') x_valid_ml_eval = np.load(database_base_path + 'x_valid.npy') # train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO)) # valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO)) valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO)) # k_fold.loc[k_fold[f'fold_{fold_n}'] == 'train', f'pred_{fold_n}'] = np.round(train_preds) # k_fold.loc[k_fold[f'fold_{fold_n}'] == 'validation', f'pred_{fold_n}'] = np.round(valid_preds) valid_df[f'pred_{fold_n}'] = valid_ml_preds # Fine-tune on validation set #################### ADD TAIL #################### x_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')]) y_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml]) valid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE'] # Build TF datasets train_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail, config['BATCH_SIZE'], AUTO, seed=SEED)) train_ml_data_iter = iter(train_ml_dist_ds) # Step functions @tf.function def train_ml_step(data_iter): def train_step_fn(x, y): with tf.GradientTape() as tape: probabilities = model(x, training=True) loss = loss_fn(y, probabilities) grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) train_auc.update_state(y, probabilities) train_loss.update_state(loss) for _ in tf.range(valid_step_size_tail): strategy.experimental_run_v2(train_step_fn, next(data_iter)) # Fine-tune on validation set optimizer = optimizers.Adam(learning_rate=config['LEARNING_RATE']) history_ml = custom_fit_2(model, metrics_dict, train_ml_step, valid_step, valid_2_step, train_ml_data_iter, valid_data_iter, valid_2_data_iter, valid_step_size_tail, valid_step_size, valid_2_step_size, config['BATCH_SIZE'], 2, config['ES_PATIENCE'], save_last=False) # Join history for key in history_ml.keys(): history[key] += history_ml[key] model.save_weights('model.h5') # Make predictions valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO)) valid_df[f'pred_ml_{fold_n}'] = valid_ml_preds ### Delete data dir shutil.rmtree(base_data_path) ``` ## Model loss graph ``` plot_metrics_2(history) ``` # Model evaluation ``` # display(evaluate_model_single_fold(k_fold, fold_n, label_col='toxic_int').style.applymap(color_map)) ``` # Confusion matrix ``` # train_set = k_fold[k_fold[f'fold_{fold_n}'] == 'train'] # validation_set = k_fold[k_fold[f'fold_{fold_n}'] == 'validation'] # plot_confusion_matrix(train_set['toxic_int'], train_set[f'pred_{fold_n}'], # validation_set['toxic_int'], validation_set[f'pred_{fold_n}']) ``` # Model evaluation by language ``` display(evaluate_model_single_fold_lang(valid_df, fold_n).style.applymap(color_map)) # ML fine-tunned preds display(evaluate_model_single_fold_lang(valid_df, fold_n, pred_col='pred_ml').style.applymap(color_map)) ``` # Visualize predictions ``` print('English validation set') display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10)) print('Multilingual validation set') display(valid_df[['comment_text', 'toxic'] + [c for c in valid_df.columns if c.startswith('pred')]].head(10)) ``` # Test set predictions ``` x_test = np.load(database_base_path + 'x_test.npy') test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO)) submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv') submission['toxic'] = test_preds submission.to_csv('submission.csv', index=False) display(submission.describe()) display(submission.head(10)) ```
github_jupyter
``` import pandas as pd import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt import pandas.util.testing as tm from sklearn.preprocessing import LabelEncoder from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import accuracy_score, precision_score, recall_score from sklearn.preprocessing import StandardScaler from sklearn.metrics import classification_report import seaborn as sns import sys sys.path.append('./../../') # OPTIONAL: Load the "autoreload" extension so that code can change %load_ext autoreload #Own Library modules import src.pca # OPTIONAL: always reload modules so that as you change code in src, it gets loaded %autoreload 2 from src.pca import PCA_from_sklearn ``` #### 1. Árbol de decisión para clasificación ##### 1.1. Usando los 7 componentes principales que se obtuvieron con el criterio de eigenvalue ``` #Se cargan los datos iniciales para obtener la target que es FoodGroup df = pd.read_csv('../../data/nndb_flat.csv', encoding = "L1") df_1 = pd.DataFrame(df.FoodGroup) df_1.FoodGroup.value_counts(dropna=False) ##Se codifica la variable FoodGroup usando LabelEncoder de sklearn labelencoder = LabelEncoder() df_1['FoodGroup'] = labelencoder.fit_transform(df_1.FoodGroup) df_1.FoodGroup.value_counts(dropna=False) ##Se cargan los 7 componentes principales que se usaran en este análisis componentesprincipales_analisis_post = pd.read_csv('../../results/data_results/componentesprincipales_analisis_post.csv') classifier = DecisionTreeClassifier(random_state = 0) classifier ###Se divide la muestra en entrenamiento y prueba, en 60% y 40%, respectivamente. X_train, X_test, y_train, y_test = train_test_split(componentesprincipales_analisis_post, df_1.FoodGroup, test_size=0.4, random_state=0) ###Desplegar el detalle de entrenamiento y prueba print( "Predictor - Training : ", X_train.shape, "Predictor - Testing : ", X_test.shape ) ###Se entrena el modelo classifier = classifier.fit(X_train, y_train) y_pred = pd.DataFrame(classifier.predict(X_test)) probs = classifier.predict_proba(X_test) ``` - Métricas para evaluar el modelo ``` print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) print("Precision:",metrics.precision_score(y_test, y_pred, average='macro')) print("Recall:",metrics.recall_score(y_test, y_pred, average='macro')) print(metrics.classification_report(y_test, y_pred)) clf_matrix=metrics.confusion_matrix(y_test,y_pred) f, ax = plt.subplots(figsize=(12,12)) sns.heatmap(pd.DataFrame(clf_matrix), annot=True, fmt="g", linewidths=.5, xticklabels=1, cmap="Greens", yticklabels=False, cbar=True) ``` ##### 1.2. Usando las variables originales ``` ##Se eligen las variables que se usaran en el modelo X = df.iloc[:, 7:45] X.describe() ##Se escalan las variables X = StandardScaler().fit_transform(X) df_1 = pd.DataFrame(df.FoodGroup) df_1.FoodGroup.value_counts(dropna=False) ##Se codifica la variable FoodGroup usando LabelEncoder de sklearn labelencoder = LabelEncoder() df_1['FoodGroup'] = labelencoder.fit_transform(df_1.FoodGroup) df_1.FoodGroup.value_counts(dropna=False) classifier = DecisionTreeClassifier(random_state=0) ###Se divide la muestra en entrenamiento y prueba, en 60% y 40%, respectivamente. X_train, X_test, y_train, y_test = train_test_split(X, df_1.FoodGroup, test_size=0.4, random_state=0) ###Se entrena el modelo classifier = classifier.fit(X_train, y_train) y_pred = pd.DataFrame(classifier.predict(X_test)) probs = classifier.predict_proba(X_test) ``` - Métricas para evaluar el modelo ``` print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) print("Precision:",metrics.precision_score(y_test, y_pred, average='macro')) print("Recall:",metrics.recall_score(y_test, y_pred, average='macro')) print(metrics.classification_report(y_test, y_pred)) clf_matrix=metrics.confusion_matrix(y_test,y_pred) f, ax = plt.subplots(figsize=(12,12)) sns.heatmap(pd.DataFrame(clf_matrix), annot=True, fmt="g", linewidths=.5, xticklabels=1, cmap="Greens", yticklabels=False, cbar=True) ``` ### K - Means - Se utiliza este algoritmo para el análisis de conglomerados. - Se eligen los dos principales componentes ``` componentesprincipales_dos = componentesprincipales_analisis_post.loc[:, ['principal_component_1', 'principal_component_2']] componentesprincipales_dos ``` Por lo que, para el primer componente principal se asocian más la vitamina Riboflavin o vitamina B2, Niacin o vitamina B3 y la vitamina B6. El porcentaje de varianza explicada de este componente es de 23.69%. Mientras que, para el segundo componente principal se asocian los carbohidratos, el azúcar y la vitamina B12. El porcentaje de varianza explicada de este segundo componente principal es de 11.38% (ver notebook [PCA_from_sklearn](https://github.com/123972/PCA-nutricion/blob/master/notebooks/Programacion/PCA_from_sklearn.ipynb)). ``` ### Se cargan las funciones import sklearn as sk from sklearn import preprocessing from sklearn.cluster import KMeans ``` - Gráfica que muestra los dos componentes principales. ``` plt.scatter(data=componentesprincipales_dos, x='principal_component_1', y='principal_component_2') plt.xlabel('Componente 1') plt.ylabel('Componente 2') ``` - Se obtiene el número de clusters óptimos a partir del punto de corte de la siguiente gráfica, el método que se uso fue el de Elbow. Sin embargo este proceso se tarda mucho y además nos arroja dos puntos de corte (2 y 5), por lo que se decidió paralelizar usando Dask. ``` wcss = [] for i in range(1, 11): kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42) kmeans.fit(componentesprincipales_dos) wcss.append(kmeans.inertia_) # Gráfica de la suma de las distancias al cuadrado plt.plot(range(1, 11), wcss) plt.xlabel('Número de clusters') plt.ylabel('Suma de las distancias al cuadrado') plt.show() ``` #### Cómputo en paralelo usando Dask ``` ### Se cargan las librerías from dask.distributed import Client, progress import dask_ml.cluster client = Client() client ``` - Al dar click en el dashboard de arriba se muestra la página de status mediante bokeh, en el puerto 8787. A continuación se presenta el apartado de workers: ``` from IPython.display import Image, display display(Image(filename='../../docs/img/dask_workers.png', embed=True)) ``` - Se obtiene el número de cluster óptimos paralelizando mediante Dask. ``` wcss = [] for i in range(1, 11): kmeans = dask_ml.cluster.KMeans(n_clusters = i, init = 'k-means++', random_state = 42) kmeans.fit(componentesprincipales_dos) wcss.append(kmeans.inertia_) # Gráfica de la suma de las distancias al cuadrado plt.plot(range(1, 11), wcss) plt.xlabel('Número de clusters') plt.ylabel('Suma de las distancias al cuadrado') plt.show() ``` - En la gráfica se observa que el número de clusters óptimos es de 2, aunque se puede ver que también el 4 tiene un poco de inflexión, así que se usaremos K- Means para dos clusters y también se paraleliza. ``` kmeans = dask_ml.cluster.KMeans(n_clusters = 2, init = 'k-means++', random_state = 42) k_means = kmeans.fit(componentesprincipales_dos) centers = kmeans.cluster_centers_ ###centroides de los clusters labels = kmeans.labels_ plt.scatter(data=componentesprincipales_dos, x='principal_component_1', y='principal_component_2', c=labels) plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.5) plt.xlabel('Componente 1 \n (vitamina B2, vitamina B3, vitamina B6)') plt.ylabel('Componente 2 \n (carbohidratos, azúcares, vitamina B12)') ``` ##### Referencias: - Palacios M. Erick, Notas de MNO 2020, [Cómputo en paralelo - Dask](https://github.com/ITAM-DS/analisis-numerico-computo-cientifico/blob/master/temas/II.computo_paralelo/2.2.Python_dask.ipynb) - Tipología_manejo_agrícola por [Irene Ramos](https://github.com/iramosp/tesis-paisajes/blob/master/Tipologia_manejo_agricola.ipynb)
github_jupyter
# 07.00 - Modeling - Prophet Model & Select Cross Validation Rolling Window Size + We have data for each summer from 1994 to 2018 + We initially decided that the minimum size of the hold out test data is 5 years from 2014 to 2018 + We want to select a rolling window that extracts as much value as possible fom the data, but that leaves as much data as possible as hold-out data + Prophet seems to have good out of the box performance, and runs faster than statsmodels ARIMA + We beleive that there are some underlying structural changes that have changed cause and effect relationships between features and power demand between 1994 and 2018 + The feature data is limited to weather. We do not have data for items such as air conditioner penetration, conserrvation growth (eg LEDs), population growth, housing stock types. + Therefore, I am going to make the assertion that next year's power demand pattern more closely resembles this year's pattern rather than last year's + We could introduce some sort of decay scheme where more recent data is weighted more heavily than older data. But this does not help us maximize the size of the held-out test data #### One approach could be: + We will use only the power data, and run a series of incrementally increasing cross validation windows across the data between 1994 and 2013 + Based on the results we will select a window for the rolling time series cross validation to use in the rest of the modeling process. We will select the window by running prophet on an incremetally increasing sequence of rolling windows, and look for either a best size, or a size beyond which we get diminishing returns. + I realize that this is breaking some rules.If the window proves to be 3 years then to get 10 cross folds, my hold out data will be from 2008 to 2018. But, I will have already "touched" some of this data when I determined the size of the rolling window. #### Another approach could be: + Make a judgement as to a reasonable time period #### Making a judgement: + If I had to draw a chart of next year's demand by reviewing a chart of the last 100 years of data, I would draw a chart that looked exactly the same as last year + or - any obvious trend. + We are making a prediction for a single year ahead, using our cross validation scheme i.e the validation set comprises one year. If we only choose a single year of test data, then our model will miss out on trends, and will be working on a 50/50 train test split. Therefore, our training period should be greater than 1 year. + Two years of training data is not enough because a degree of randomness is introduced by the weather. ie. if we have a hot summer followed by a cold summer, this could be seen as a trend, but it is really randomness. Therefore, our training period should be greater than 2 years. + Twenty years seems too long because diverse undelying structural changes in the demand patterns mean that year 1 is not really the "same" as year 20 + At this point, I have delayed making this decision long enough, and I am going to (semi-)arbitrarily select a training period of 5 years. This gives a train/ validation split of 83/17% which seems reasonable. My opinion is that this period is long enough to capture trends, and short enough to give a reasonably close representation of the validation data + I want to keep 10 cross folds in order to capture the uncertainty in the model + Therefore my data split will look like this: + Training Data - 1994 to 2009 with a 10 fold rolling tiome series cross validation + Test Data - 2010 to 2018 - 9 years ## Imports & setup ``` import pathlib import warnings from datetime import datetime import sys import pickle import joblib import gc import pandas as pd import numpy as np # Plotting import matplotlib as mpl import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from matplotlib.dates import DateFormatter import matplotlib.dates as mdates # Imports sys.path.append("..") from src.utils.utils import (AnnualTimeSeriesSplit, RollingAnnualTimeSeriesSplit, bound_precision, run_cross_val, run_data_split_cross_val, save_run_results) from src.features.features import CyclicalToCycle from src.models.models import SK_SARIMAX, SK_Prophet, SetTempAsPower, SK_Prophet_1 from src.visualization.visualize import (plot_prediction, plot_joint_plot, residual_plots, print_residual_stats, resids_vs_preds_plot) #b # Packages from sklearn.pipeline import Pipeline from skoot.feature_selection import FeatureFilter from skoot.preprocessing import SelectiveRobustScaler from sklearn.metrics import mean_absolute_error from scipy.stats import norm from statsmodels.graphics.gofplots import qqplot from pandas.plotting import autocorrelation_plot from statsmodels.graphics.tsaplots import plot_acf import statsmodels.api as sm from fbprophet import Prophet # Display pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) figsize=(15,7) warnings.filterwarnings(action='ignore') %matplotlib inline # Data PROJECT_DIR = pathlib.Path.cwd().parent.resolve() CLEAN_DATA_DIR = PROJECT_DIR / 'data' / '05-clean' MODELS_DIR = PROJECT_DIR / 'data' / 'models' RESULTS_PATH = PROJECT_DIR / 'data' /'results' / 'results.csv' ``` ## Load Daily Data & Inspect ``` df = pd.read_csv(CLEAN_DATA_DIR / 'clean-features.csv', parse_dates=True, index_col=0) X = df.copy(deep=True) X = X.loc['1994': '2009'] y = X.pop('daily_peak') X.head() y.tail() ``` ## Prophet Model Run using just the y data - the daily peak demand ``` n_splits=10 prophet_model = SK_Prophet(pred_periods=96) ratscv = RollingAnnualTimeSeriesSplit(n_splits=n_splits, goback_years=5) steps = [('prophet', prophet_model)] pipeline = Pipeline(steps) d = run_cross_val(X, y, ratscv, pipeline, scoring=['mae', 'bound_precision']) d # Take a look at the results on the validation data print(np.mean(d['test']['mae'])) print(np.mean(d['test']['bound_precision'])) ```
github_jupyter
**Aim: Implement Decsion Tree classifier** - Implement Decision Tree classifier using scikit learn library - Test the classifier for Weather dataset Step 1: Import necessary libraries. ``` from sklearn import preprocessing from sklearn.tree import DecisionTreeClassifier ``` Step 2: Prepare dataset. ``` #Predictor variables Outlook = ['Rainy', 'Rainy', 'Overcast', 'Sunny', 'Sunny', 'Sunny', 'Overcast', 'Rainy', 'Rainy', 'Sunny', 'Rainy','Overcast', 'Overcast', 'Sunny'] Temperature = ['Hot', 'Hot', 'Hot', 'Mild', 'Cool', 'Cool', 'Cool', 'Mild', 'Cool', 'Mild', 'Mild', 'Mild', 'Hot', 'Mild'] Humidity = ['High', 'High', 'High', 'High', 'Normal', 'Normal', 'Normal', 'High', 'Normal', 'Normal', 'Normal', 'High', 'Normal', 'High'] Wind = ['False', 'True', 'False', 'False', 'False', 'True', 'True', 'False', 'False', 'False', 'True', 'True', 'False', 'True'] #Class Label: Play = ['No', 'No', 'Yes', 'Yes', 'Yes', 'No', 'Yes', 'No', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes', 'No'] ``` Step 3: Digitize the data set using encoding ``` #creating labelEncoder le = preprocessing.LabelEncoder() # Converting string labels into numbers. Outlook_encoded = le.fit_transform(Outlook) Outlook_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print("Outllok mapping:",Outlook_name_mapping) Temperature_encoded = le.fit_transform(Temperature) Temperature_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print("Temperature mapping:",Temperature_name_mapping) Humidity_encoded = le.fit_transform(Humidity) Humidity_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print("Humidity mapping:",Humidity_name_mapping) Wind_encoded = le.fit_transform(Wind) Wind_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print("Wind mapping:",Wind_name_mapping) Play_encoded = le.fit_transform(Play) Play_name_mapping = dict(zip(le.classes_, le.transform(le.classes_))) print("Play mapping:",Play_name_mapping) print("\n\n") print("Weather:" ,Outlook_encoded) print("Temerature:" ,Temperature_encoded) print("Humidity:" ,Humidity_encoded) print("Wind:" ,Wind_encoded) print("Play:" ,Play_encoded) ``` Step 4: Merge different features to prepare dataset ``` features = tuple(zip(Outlook_encoded, Temperature_encoded, Humidity_encoded, Wind_encoded)) features ``` Step 5: Train ’Create and Train DecisionTreeClassifier’ ``` #Create a Decision Tree Classifier (using Entropy) DT = DecisionTreeClassifier(criterion = "entropy") # Train the model using the training sets final_model = DT.fit(features, Play_encoded) #(features, Class_label) ``` Step 6: Predict Output for new data ``` #Predict Output prediction = DT.predict([[0, 1, 1, 0],[2, 2, 1, 1], [2, 2, 0, 1]]) # last is from training dataset and expected o/p was 0 and it is print("Predicted Values for Playing: ", prediction) #No:0 ; Yes:1 ``` Step 7: Display Decsion Tree Created - This step requires graphviz and tkinter packages installed ``` from sklearn.tree import export_graphviz export_graphviz(DT,out_file='tree_entropy.dot', feature_names=['outlook','temperature','humidity','wind'], class_names=['play_no','play_yes'], filled=True) # Convert to png from subprocess import call call(['dot', '-Tpng', 'tree_entropy.dot', '-o', 'tree_entropy.png', '-Gdpi=600'], shell=True) # Display in python import matplotlib.pyplot as plt plt.figure(figsize = (14, 18)) plt.imshow(plt.imread('tree_entropy.png')) plt.axis('off'); plt.show(); from sklearn.tree import plot_tree print(plot_tree(DT, class_names = ["Yes", "No"])) ```
github_jupyter
https://fenderist.tistory.com/168 타자 AVG(Batting Average) : 타율 G (Game) : 참여경기수(경기) PA(Plate Appearances) : 타석수( 타자가 타석에 선 횟수 ), 한게임 평균 3~4타석 슴 AB(At Bat) : 타수 ( 타격을 완료한 횟수, 볼넷, 희생번트, 타격 방해등은 포함 X) R(Runs) : 득점 ( 홈플레이트를 밟아 팀에 점수가 올랐을때 기록됨 ) H(Hits) : 안타 2B(double) : 2루타 3B(Triple) : 3루타 HR(Home Run) : 홈런 TB(Total Bases) : 총 루타, 계산법(H+2B+(2*3B)+(3*HR) or 1B+(2*2B)+(3*3B)+(4*HR)) TBI(Runs Batted In) : 타점 SAC(Sacrifice Bunt) : 희생번트 SF(sacrifice Flying): 희생플라이 BB(Base on Balls) : 볼넷(4구) IBB(Intentional Base on Balls) : 고의 4구 HBP(Bit By Pitch) : 사구(몸에 맞는 볼) SO(Strike Out) : 삼진 GDP(Groinded Into Double Play) : 병살타 (주자와 타자 모두 아웃되는 타격, 2아웃 이상) SLG(Slugging Percentage) : 장타율 OBP(On-Base Percentage) : 출루율 OPS( OBP + SLG ) : OPS MH(Multi Hits) : 멀티히트 RISP(Batting Average with Runners in Scoring Position) : 득점권 타율, 2루나 3루에 주자 진출해있을때 타자가 안타칠확률 PH-BA(Pinch Batting Average) : 대타 타율 투수 ERA(Earned Run Average) : 9이닝당 평균 자책점 G : 경기수 W(Wins) : 승리 L(Losses) : 패배 SV(Save) : 세이브 HLD(Hold) : 홀드 WPCT(Win Percentage) : 승률 IP(Innings Pitched) : 던진 이닝 H(Hits) : 피안타 HR(Home Run) : 피홈런 BB(Base on Balls) : 볼넷 HBP(Hit By Pitch) : 사구(몸에 맞는 공) SO(Strike Out) : 삼진 R(Runs): 실점 ER(Earned Runs) :자책점(야수 실책이 아닌, 투수가 내준 점수) WHIP(Walks plus Hits divided by Innings Pitched) : 이닝당 출루허용률 CG(Completed Games) : 완투승 ( 9이닝까지 투수가 전부 다 던져서 이긴 게임수) SHO(Shutouts) : 완봉승( 9이닝까지 투수가 무실점으로 던져서 이긴 게임수) QS(Quality Start) : 퀄리티스타트 수 , 6이닝이상 3실점 이하로 던졌을때 퀄리티 스타트 라고한다. BSV(Blown Saves) : 블론 세이브, 세이브/홀드 기회에서 동점 또는 역전 허용시 블론 세이브라고함. TBF(Total Batters Faced) : 상대 타자수 NP(Number of Pitchs) : 투구수 AVG(Batting Average) : 피안타율 2B(Double) : 2루타 3B(Triple) : 3루타 SAC(Sacrifice Bunt): 희생번트 SF(sacrifice Flying) : 희생플라이 IBB(Intentional Base on Balls) : 고의 사구 WP(Wild Pitches ) : 폭투 , 투구 에러 BK(Balks) : 보크, 주자가 있는 상황에서 투수의 반칙 행위 ``` from bs4 import BeautifulSoup from selenium import webdriver import requests import time import csv driver = webdriver.Chrome("./chromedriver") driver.get("https://www.koreabaseball.com/Record/Player/HitterBasic/Basic1.aspx") # driver.get("https://www.koreabaseball.com/Record/Player/Defense/Basic.aspx") # driver.get("https://www.koreabaseball.com/Record/Player/Runner/Basic.aspx") driver = webdriver.Chrome("./chromedriver") driver.get("https://www.koreabaseball.com/Record/Player/HitterBasic/Basic1.aspx") with open("kbo.csv", "w", encoding="utf-8") as f: writer = csv.writer(f) soup = BeautifulSoup(driver.page_source, "lxml") hitter_table = soup.find("table", attrs={"class": "tData01 tt"}) hitter_thead = hitter_table.find("thead") data_rows = hitter_thead.find_all("tr") for row in data_rows: columns = row.find_all("th") data = [column.text for column in columns] writer.writerow(data) for i in range(1, 3): time.sleep(2) driver.find_element_by_link_text(f"{i}").click() time.sleep(0.5) soup = BeautifulSoup(driver.page_source, "lxml") hitter_table = soup.find("table", attrs={"class": "tData01 tt"}) hitter_tbody = hitter_table.find("tbody") data_rows = hitter_tbody.find_all("tr") for row in data_rows: columns = row.find_all("td") data = [column.text for column in columns] writer.writerow(data) driver.quit() driver = webdriver.Chrome("./chromedriver") driver.get("https://www.koreabaseball.com/Record/Player/HitterBasic/Basic2.aspx") with open("kbo2.csv", "w") as f: writer = csv.writer(f) # --------- thead 과정 soup = BeautifulSoup(driver.page_source, "lxml") hitter_table = soup.find("table", attrs={"class": "tData01 tt"}) hitter_thead = hitter_table.find("thead") data_rows = hitter_thead.find_all("tr") for row in data_rows: columns = row.find_all("th") data = [column.text for column in columns] writer.writerow(data) # --------- # --------- tbody for i in range(1, 3): time.sleep(2) driver.find_element_by_link_text(f"{i}").click() time.sleep(0.5) soup = BeautifulSoup(driver.page_source, "lxml") hitter_table = soup.find("table", attrs={"class": "tData01 tt"}) hitter_tbody = hitter_table.find("tbody") data_rows = hitter_tbody.find_all("tr") for row in data_rows: columns = row.find_all("td") data = [column.text for column in columns] writer.writerow(data) # --------- driver.quit() import pandas as pd df_1 = pd.read_csv("kbo.csv") df_2 = pd.read_csv("kbo2.csv") df_3 = pd.concat([df_1, df_2], axis=1, join="inner") df_3.to_csv("kbo3.csv") renew_df = df_3.loc[:, ~df_3.T.duplicated()] renew_df.to_csv("kbo4.csv") renew_df ```
github_jupyter
# Linear Algebra Review ![image.png](attachment:image.png) - **Scalar:** Any single numerical value. - **Vector:** An array of numbers(data) is a vector. - **Matrix:** A matrix is a 2-D array of shape (m×n) with m rows and n columns. - **Tensor:** Generally, an n-dimensional array where n>2 is called a Tensor. But a matrix or a vector is also a valid tensor. ``` import numpy as np ``` ### Creating Vector ``` arr_1 = np.array([1,2,3,4,5]) arr_1 print(f"Type: {type(arr_1)}") print(f"Shape: {arr_1.shape}") print(f"Dimension: {arr_1.ndim}") ``` ### Creating Matrice ``` arr_2 = np.array([[1,2,3,4],[5,6,7,8]]) arr_2 print(f"Type: {type(arr_2)}") print(f"Shape: {arr_2.shape}") print(f"Dimension: {arr_2.ndim}") ``` # Addition and Scalar Multiplication ## Addition Two matrices may be added or subtracted only if they have the same dimension; that is, they must have the same number of rows and columns. Addition or subtraction is accomplished by adding or subtracting corresponding elements. ``` matrice_1 = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 8, 6, 5]]) matrice_2 = np.array([[-1, 4, 3, 5], [1, 4, 7, 9], [-6, 5, 11, -4]]) print(f"Matrice_1: \n{matrice_1}") print(f"\nMatrice_2: \n{matrice_2}") ``` ### Adding two matrices ``` matrice_1 + matrice_2 ``` --- ## Multiplication ### Scalar Multiplication The term scalar multiplication refers to the product of a real number and a matrix. In scalar multiplication, each entry in the matrix is multiplied by the given scalar. ``` matrice_1 * matrice_2 ``` ### Matrix-Vector Multiplication Multiplication between a matrix "M" and a vector "v", we need to view the vector as a column matrix. We define the matrix-vector product only for the case when the number of columns in M equals the number of rows in v. So, if M is an m×n matrix (i.e., with n columns), then the product Mv is defined for n×1 column vectors x. If we let Mv=r, then r is an m×1 column vector. ``` M = np.array([[ 6, 1 ,3], [ -1, 1 ,1], [ 1, 3 ,2]]) v = np.array([1, 2, 3]) ``` #### Option 1: ``` M.dot(v) ``` #### Option 2: ``` np.dot(M,v) ``` ### Matrix-Matrix Multiplication Matrix-Matrix multiplication, the number of columns in the first matrix must be equal to the number of rows in the second matrix. The resulting matrix, known as the matrix product, has the number of rows of the first and the number of columns of the second matrix. ``` C = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 8, 6, 5]]) D = np.array([[-1, 4, 3, 5], [1, 4, 7, 9], [-6, 5, 11, -4]]).reshape(4,3) C.dot(D) np.dot(C,D) ``` ## Matrix Multiplication Properties 1. The commutative property of multiplication $AB \neq BA$ 2. Associative property of multiplication $(AB)C = A(BC)$ 3. Distributive properties $A(B+C) = AB+AC$ 4. Multiplicative identity property $ IA =A\, \& \, AI=A$ 5. Multiplicative property of zero $ I0 =0 \, \& \, A0=0$ 6. Dimension property # Inverse and Transpose ## Inverse In linear algebra, an n-by-n square matrix A is called invertible (also nonsingular or nondegenerate), if there exists an n-by-n square matrix B such that $ AB=BA=I $ where In denotes the n-by-n identity matrix and the multiplication used is ordinary matrix multiplication. If this is the case, then the matrix B is uniquely determined by A, and is called the (multiplicative) inverse of A, denoted by A−1. ``` x = np.array([[4, 9], [25, 36]]) y = np.array([[8, 5], [1, 2]]) x_inv = np.linalg.inv(x) x.dot(x_inv) ``` ## Transpose In linear algebra, the transpose of a matrix is an operator which flips a matrix over its diagonal; that is, it switches the row and column indices of the matrix $A$ by producing another matrix, often denoted by $A^T$(among other notations). ``` x x_trans = x.T x_trans A = np.random.randint(1, 10, size=(5, 3)) print(f"Matrice: \n{A}") print(f"\nShape: {A.shape}") A_t = A.T print(f"Matrice: \n{A_t}") print(f"\nShape: {A_t.shape}") ```
github_jupyter
## 1. 데이터 불러오기 ``` import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import random data1 = pd.read_csv('C:/Users/Soyoung Cho/Desktop/NMT Project/dataset/train.csv') data2 = pd.read_csv('C:/Users/Soyoung Cho/Desktop/NMT Project/dataset/test.csv') data3 = pd.read_csv('C:/Users/Soyoung Cho/Desktop/NMT Project/dataset/tatoeba_data.csv') print("data1: ", len(data1)) print("data2: ", len(data2)) print("data3: ", len(data3)) data = pd.concat([data1, data2, data3]) data = data.reset_index(drop=True) # 0~20만, 0~20만 이런 인덱스까지 concat 되었던 것을 초기화, 다시 인덱스 주었다 data Kor_list = list(data['Korean']) #모든 한글 문장이 담긴 리스트 Eng_list = list(data['English']) #모든 영어 문장이 담긴 리스트 print(Kor_list[:5]) print("\n") print(Eng_list[:5]) result = list(zip(Kor_list,Eng_list)) random.shuffle(result) result Kor_list, Eng_list = zip(*result) dict_ = {"Korean": [], "English" : []} dict_["Korean"] = Kor_list dict_["English"] = Eng_list data = pd.DataFrame(dict_) data ``` ## 2. 데이터 중복 검사 및 제거 ``` data.describe() data.duplicated().sum() data = data.drop_duplicates() data.duplicated().sum() data = data.reset_index(drop=True) # 0~20만, 0~20만 이런 인덱스까지 concat 되었던 것을 초기화, 다시 인덱스 주었다 data #data.to_csv("datalist.csv", encoding = 'utf-8-sig', index = False, mode = "w") ``` ## 3. 문장별 단어 개수 파악 & 문제 파악 ``` kor_word_cnt = [] eng_word_cnt = [] for i in range(len(data)): kor_word_cnt.append(len(data['Korean'][i].split(" "))) eng_word_cnt.append(len(data['English'][i].split(" "))) data["Korean_word_count"] = kor_word_cnt data["English_word_count"] = eng_word_cnt ``` ### (1) 단어 개수 별 정렬해 데이터 확인 & 문제 수정 ``` kor_sorted = data.sort_values(by=['Korean_word_count'], axis=0, ascending=False) kor_sorted = kor_sorted.reset_index(drop=True) kor_sorted.head() kor_sorted[0:10] kor_sorted[-10:] ``` #### 문제 발견 및 수정, 데이터 재저장 ``` kor_sorted["Korean"][1603523] kor_sorted["Korean"][1603524] kor_sorted["Korean"][1603515] ``` 중간의 \xa0 때문에 한 문장이 한 단어로 인식 되었었다. 전체 dataset의 \xa0 를 " " 로 대체해준다. 다시 word를 카운트 해준다. ``` data.replace("\xa0", " ", regex=True, inplace=True) #data ``` --- --- ### (2) 한글 문장 단어 개수 파악 ``` kor_sorted = data.sort_values(by=['Korean_word_count'], axis=0, ascending=False) kor_sorted = kor_sorted.reset_index(drop=True) kor_sorted.head() kor_sorted[-110:-90] kor_sorted["Korean"][1603427] kor_sorted[-130:-110] ``` 위와 같이 띄어쓰기가 아예 이루어지지 않은 문장은 어떻게 해야할지 고민 필요. ``` kor_sorted[0:20] ``` ### (3) 영어 문장 단어 개수 파악 ``` eng_sorted = data.sort_values(by=['English_word_count'], axis=0, ascending=False) eng_sorted = eng_sorted.reset_index(drop=True) eng_sorted[:20] eng_sorted['English'][0] eng_sorted['English'][1] eng_sorted['English'][2] eng_sorted['English'][3] len(eng_sorted['English'][3].split(" ")) len('"We will play the role of a hub for inter-Korean exchanges of performing arts and culture," said Kim Cheol-ho, 66, the new director of the National Theater of Korea, at an inaugural press conference held at a restaurant in Jongro-gu, Seoul on the 8th and he said, "We can invite North Korean national art troupe for the festival that will be held in 2020 for the 70th anniversary of our foundation." '.split(" ")) eng_sorted[-30:] ``` 한글 데이터는 짧은 문장의 경우 띄어쓰기가 잘 안 이루어져 있음. 영어 데이터는 긴 문장의 경우 띄어쓰기가 지나치게 많이 들어가 있음. 짧은 문장의 경우 검수 안된 문장들 . 혹은 x 등 많음. ## 4. 박스플롯 그려보기 ``` print("한글 문장 중 가장 적은 단어 개수 가진 문장은 ", min(kor_word_cnt)) print("한글 문장 중 가장 많은 단어 개수 가진 문장은 ", max(kor_word_cnt)) print("영어 문장 중 가장 적은 단어 개수 가진 문장은 ", min(eng_word_cnt)) print("영어 문장 중 가장 많은 단어 개수 가진 문장은 ", max(eng_word_cnt)) fig, ax = plt.subplots(figsize = (12,8)) sns.boxplot(kor_word_cnt) plt.show() fig, ax = plt.subplots(figsize = (12,8)) sns.boxplot(eng_word_cnt) plt.show() ``` ## 5. 데이터 저장하기 ``` del data['Korean_word_count'] del data['English_word_count'] #data.to_csv("C:/Users/Soyoung Cho/Desktop/NMT Project/dataset/datalist_modified.csv", encoding = 'utf-8-sig', index = False, mode = "w") ```
github_jupyter
> **Tip**: Welcome to the Investigate a Dataset project! You will find tips in quoted sections like this to help organize your approach to your investigation. Before submitting your project, it will be a good idea to go back through your report and remove these sections to make the presentation of your work as tidy as possible. First things first, you might want to double-click this Markdown cell and change the title so that it reflects your dataset and investigation. # Project: Investigate a Dataset (Replace this with something more specific!) ## Table of Contents <ul> <li><a href="#intro">Introduction</a></li> <li><a href="#wrangling">Data Wrangling</a></li> <li><a href="#eda">Exploratory Data Analysis</a></li> <li><a href="#conclusions">Conclusions</a></li> </ul> <a id='intro'></a> ## Introduction > **Tip**: In this section of the report, provide a brief introduction to the dataset you've selected for analysis. At the end of this section, describe the questions that you plan on exploring over the course of the report. Try to build your report around the analysis of at least one dependent variable and three independent variables. > > If you haven't yet selected and downloaded your data, make sure you do that first before coming back here. If you're not sure what questions to ask right now, then make sure you familiarize yourself with the variables and the dataset context for ideas of what to explore. ``` #The dataset which is selected is tmdb-movies.csv i.e. movies dataset which contains data on movies and ratings. #Revenue,Runtime and Popularity is tend to be explored.Over a period span reveneue v/s runtime, runtime v/s popularity and popularity v/s revenue is to be explored. #Questions which will be answered are: # 1.Over the decades, what are the popular runtimes? # 2.Spanning the time periods, is revenue proportional to popularity? # 3.Does runtime affect popularity? #only visualization and basic correlations are attempted in this project.And any investigation and exploratory are tentative at its best. # Use this cell to set up import statements for all of the packages that you # plan to use. # Remember to include a 'magic word' so that your visualizations are plotted # inline with the notebook. See this page for more: # http://ipython.readthedocs.io/en/stable/interactive/magics.html import pandas as pd import numpy as np import csv import datetime as datetime import matplotlib.pyplot as plt % matplotlib inline ``` <a id='wrangling'></a> ## Data Wrangling > **Tip**: In this section of the report, you will load in the data, check for cleanliness, and then trim and clean your dataset for analysis. Make sure that you document your steps carefully and justify your cleaning decisions. ### General Properties ``` # Load your data and print out a few lines. Perform operations to inspect data # types and look for instances of missing or possibly errant data. df=pd.read_csv('tmdb-movies.csv') df.info() df.head() #df.drop_duplicates() Return DataFrame with duplicate rows removed, optionally only considering certain columns sum(df.duplicated()) df.drop_duplicates(inplace=True) df.info() #isnull Return a boolean same-sized object indicating if the values are NA. #sum of those values are taken df.isnull().sum() ``` > **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report. > **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s). ### Data Cleaning (Replace this with more specific notes!) ``` # After discussing the structure of the data and any problems that need to be # cleaned, perform those cleaning steps in the second part of this section. df.info() #earlier, we have removed the single duplicate record. # Here, we are removing rows with null values in imdb_id column df.dropna(subset=['imdb_id'], inplace=True) df.info() df.head() # write dataframe 'df' to a csv file 'data_imdb.csv' to use it for the next session df.to_csv('data_imdb.csv', index=False) ``` <a id='eda'></a> ## Exploratory Data Analysis > **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables. ### Research Question 1 (Replace this header name!) ``` # Use this, and more code cells, to explore your data. Don't forget to add # Markdown cells to document your observations and findings. # Importing data from newly cleaned dataset df_imdb = pd.read_csv('data_imdb.csv') df_imdb.info() df_imdb df_new = df_imdb.groupby('release_year').mean() df_new df_new.plot(kind='bar') df_new.plot(kind='hist') df_new.describe() df_new.hist() ``` ### Research Question 2 (Replace this header name!) ``` # Continue to explore the data to address your additional research # questions. Add more headers as needed if you have more questions to # investigate. #The given below are observation Popularity,Revenue and Runtime df_new['popularity'].hist() plt.xlabel('Popularity') plt.title('Popularity Over the Years'); df_new['revenue'].hist() plt.xlabel('Revenue') plt.title('Revenue Over the Years'); df_new['runtime'].hist() plt.xlabel('Runtime') plt.title('Runtime Over the Years'); df_new['popularity'].describe() df_new['revenue'].describe() df_new['runtime'].describe() # Continue to explore the data to address your additional research # questions. Add more headers as needed if you have more questions to # investigate. #We can see that distribution is left skewed. #Most movie revenues fall in the 3.257984e+07 to 4.293171e+07 ranges. ``` <a id='conclusions'></a> ## Conclusions > **Tip**: Finally, summarize your findings and the results that have been performed. Make sure that you are clear with regards to the limitations of your exploration. If you haven't done any statistical tests, do not imply any statistical conclusions. And make sure you avoid implying causation from correlation! > **Tip**: Once you are satisfied with your work, you should save a copy of the report in HTML or PDF form via the **File** > **Download as** submenu. Before exporting your report, check over it to make sure that the flow of the report is complete. You should probably remove all of the "Tip" quotes like this one so that the presentation is as tidy as possible. Congratulations! ``` from subprocess import call call(['python', '-m', 'nbconvert', 'Investigate_a_Dataset.ipynb']) ```
github_jupyter
``` import data_loader as dl import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd np.random.seed(0) train_df, valid_df = dl.load_train_data("adult.data") test_df = dl.load_test_data("adult.test") column_names = ['age', 'workclass', 'fnlwgt', 'education', 'education.num', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'capital.gain', 'capital.loss', 'hours.per.week', 'native.country', 'income'] train_df.columns = column_names train_df.replace(' ?', np.nan, inplace=True) from sklearn.impute import SimpleImputer imp_mode = SimpleImputer(missing_values=np.nan, strategy='most_frequent') imp_mode.fit(train_df) imp_train_df = imp_mode.transform(train_df) train_df = pd.DataFrame(imp_train_df, columns = column_names) for col in ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']: train_df[col] = train_df[col].astype('int64') for col in ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income']: train_df[col] = train_df[col].astype('category') train_df['capital.gain.loss'] = train_df['capital.gain'] - train_df['capital.loss'] num_features = ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week', 'capital.gain.loss'] cat_features = ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income'] cat_features.remove("education") cat_features.remove("relationship") cat_features.remove("income") for cat in cat_features: train_df = pd.concat([train_df,pd.get_dummies(train_df[cat], prefix='is')],axis=1) train_df.drop([cat],axis=1, inplace=True) train_df['income.prediction'] = train_df.income.cat.codes train_df.drop(labels=["fnlwgt", "education","relationship", "income"], axis = 1, inplace = True) valid_df.columns = column_names valid_df.replace(' ?', np.nan, inplace=True) imp_train_df = imp_mode.transform(valid_df) valid_df = pd.DataFrame(imp_train_df, columns = column_names) for col in ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']: valid_df[col] = valid_df[col].astype('int64') for col in ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income']: valid_df[col] = valid_df[col].astype('category') valid_df['capital.gain.loss'] = valid_df['capital.gain'] - valid_df['capital.loss'] num_features = ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week', 'capital.gain.loss'] cat_features = ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income'] cat_features.remove("education") cat_features.remove("relationship") cat_features.remove("income") for cat in cat_features: valid_df = pd.concat([valid_df,pd.get_dummies(valid_df[cat], prefix='is')],axis=1) valid_df.drop([cat],axis=1, inplace=True) valid_df['income.prediction'] = valid_df.income.cat.codes valid_df.drop(labels=["fnlwgt", "education","relationship", "income"], axis = 1, inplace = True) missing_cols = set( train_df.columns ) - set( valid_df.columns ) for c in missing_cols: valid_df[c] = 0 valid_df = valid_df[train_df.columns] test_df.columns = column_names test_df.replace(' ?', np.nan, inplace=True) imp_train_df = imp_mode.transform(test_df) test_df = pd.DataFrame(imp_train_df, columns = column_names) for col in ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']: test_df[col] = test_df[col].astype('int64') for col in ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income']: test_df[col] = test_df[col].astype('category') test_df['capital.gain.loss'] = test_df['capital.gain'] - test_df['capital.loss'] num_features = ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week', 'capital.gain.loss'] cat_features = ['workclass', 'education', 'martial.status', 'occupation', 'relationship', 'race', 'sex', 'native.country', 'income'] cat_features.remove("education") cat_features.remove("relationship") cat_features.remove("income") for cat in cat_features: test_df = pd.concat([test_df,pd.get_dummies(test_df[cat], prefix='is')],axis=1) test_df.drop([cat],axis=1, inplace=True) test_df['income.prediction'] = test_df.income.cat.codes test_df.drop(labels=["fnlwgt", "education","relationship", "income"], axis = 1, inplace = True) missing_cols = set( train_df.columns ) - set( test_df.columns ) for c in missing_cols: test_df[c] = 0 test_df = test_df[train_df.columns] from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import roc_auc_score from collections import namedtuple Model = namedtuple('Model', 'name model') train = train_df.values X, Y= train[:,:-1], train[:,-1] valid = valid_df.values Xval, Yval = valid[:,:-1], valid[:,-1] test = test_df.values Xtst, Ytst = test[:,:-1], test[:,-1] models = [] models.append(Model('LR', LogisticRegression(solver='liblinear'))) models.append(Model('KNN', KNeighborsClassifier())) models.append(Model('DT', DecisionTreeClassifier())) models.append(Model('RF', RandomForestClassifier())) models.append(Model('GB', GradientBoostingClassifier())) for m in models: m.model.fit(X, Y) print(m.name) Yhat = m.model.predict(Xval) auc = roc_auc_score(Yval, Yhat) print("AUC: {}".format(auc)) kList = [1, 3, 5, 10, 25, 50, 100] tr_auc = np.zeros(len(kList)) va_auc = np.zeros(len(kList)) for i, kValue in enumerate(kList): model = KNeighborsClassifier(n_neighbors=kValue) model.fit(X,Y) Yhat = model.predict(X) tr_auc[i] = roc_auc_score(Y, Yhat) Yhat = model.predict(Xval) va_auc[i] = roc_auc_score(Yval, Yhat) plt.plot(kList, tr_auc, 'r', label='Training AUC', marker='o') plt.plot(kList, va_auc, 'g', label='Validation AUC', marker='s') plt.title("Training and Validation AUC vs K") plt.xlabel("K") plt.ylabel("AUC") plt.xticks(kList) plt.legend(framealpha=0.75) plt.show() print("Train AUC: {}".format(tr_auc)) print("Validation AUC: {}".format(va_auc)) maxDepth = [1, 3, 5, 10, 25, 30, 40, 50, 100] tr_auc = np.zeros(len(maxDepth)) va_auc = np.zeros(len(maxDepth)) for i, d in enumerate(maxDepth): model = DecisionTreeClassifier(max_depth=d) model.fit(X,Y) Yhat = model.predict(X) tr_auc[i] = roc_auc_score(Y, Yhat) Yhat = model.predict(Xval) va_auc[i] = roc_auc_score(Yval, Yhat) plt.plot(maxDepth, tr_auc, 'r', label='Training AUC', marker='o') plt.plot(maxDepth, va_auc, 'g', label='Validation AUC', marker='s') plt.title("Training and Validation AUC vs Max Depth") plt.xlabel("Max Depth") plt.ylabel("AUC") plt.xticks(kList) plt.legend(framealpha=0.75) plt.show() print("Train AUC: {}".format(tr_auc)) print("Validation AUC: {}".format(va_auc)) trees = [1, 3, 5, 10, 25, 30, 40, 50, 100] tr_auc = np.zeros(len(trees)) va_auc = np.zeros(len(trees)) for i, t in enumerate(trees): model = RandomForestClassifier(n_estimators=t) model.fit(X,Y) Yhat = model.predict(X) tr_auc[i] = roc_auc_score(Y, Yhat) Yhat = model.predict(Xval) va_auc[i] = roc_auc_score(Yval, Yhat) plt.plot(trees, tr_auc, 'r', label='Training AUC', marker='o') plt.plot(trees, va_auc, 'g', label='Validation AUC', marker='s') plt.title("Training and Validation AUC vs Number of Trees") plt.xlabel("Number of Trees") plt.ylabel("AUC") plt.xticks(trees) plt.legend(framealpha=0.75) plt.show() print("Train AUC: {}".format(tr_auc)) print("Validation AUC: {}".format(va_auc)) nEstimators = [1, 3, 5, 10, 25, 30, 40, 50, 100, 250, 500, 1000] tr_auc = np.zeros(len(nEstimators)) va_auc = np.zeros(len(nEstimators)) for i, n in enumerate(nEstimators): model = GradientBoostingClassifier(n_estimators=n) model.fit(X,Y) Yhat = model.predict(X) tr_auc[i] = roc_auc_score(Y, Yhat) Yhat = model.predict(Xval) va_auc[i] = roc_auc_score(Yval, Yhat) plt.plot(nEstimators, tr_auc, 'r', label='Training AUC', marker='o') plt.plot(nEstimators, va_auc, 'g', label='Validation AUC', marker='s') plt.title("Training and Validation AUC vs Number of Boosting Stages") plt.xlabel("Number of Boosting Stages") plt.ylabel("AUC") plt.xticks(nEstimators) plt.legend(framealpha=0.75) plt.show() print("Train AUC: {}".format(tr_auc)) print("Validation AUC: {}".format(va_auc)) ```
github_jupyter
``` #import all the dependencies import os import csv import pandas as pd import matplotlib.pyplot as plt import seaborn as sns #read the csv files to view the data google_apps = pd.read_csv("googleplaystore.csv") google_apps.shape ``` # Data Cleaning ``` #Check for number of apps in total no_apps = google_apps["App"].nunique() print(f"Total number of unique apps: {no_apps}") #dropping all the duplicate apps from the dataframe google_apps.drop_duplicates(subset = "App", inplace = True) google_apps #size of the apps are not consistent so convert all to same measure and replace any other values with nan and "" google_apps["Size"] = google_apps["Size"].apply(lambda x: str(x).replace(",", "") if "," in str(x) else x) google_apps["Size"] = google_apps["Size"].apply(lambda x: str(x).replace('M', '') if 'M' in str(x) else x) google_apps["Size"] = google_apps["Size"].apply(lambda x: str(x).replace("Varies with device", "NAN") if "Varies with device" in str(x) else x) google_apps["Size"] = google_apps["Size"].apply(lambda x: float(str(x).replace('k', '')) / 1000 if 'k' in str(x) else x) #convert all the sizes to float # google_apps = google_apps.drop([10472]) google_apps["Size"] = google_apps["Size"].apply(lambda x:float(x)) #Install column has '+' sign so removing that will help easy computation google_apps["Installs"] = google_apps["Installs"].apply(lambda x:x.replace("+","")if "+" in str(x) else x) google_apps["Installs"] = google_apps["Installs"].apply(lambda x: x.replace(",","") if "," in str(x) else x) google_apps["Installs"] = google_apps["Installs"].apply(lambda x:float(x)) #Make the price column consistent by removing the '$' symbol and replacing "Free" with 0 google_apps["Price"] = google_apps["Price"].apply(lambda x: x.replace("Free",0) if "Free" in str(x) else x) google_apps["Price"] = google_apps["Price"].apply(lambda x:x.replace("$","") if "$" in str(x)else x) google_apps["Price"] = google_apps["Price"].apply(lambda x: float(x)) google_apps["Price"].dtype ``` # Exploratory Analysis ``` #Basic pie chart to view distribution of apps across various categories fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(aspect="equal")) number_of_apps = google_apps["Category"].value_counts() labels = number_of_apps.index sizes = number_of_apps.values ax.pie(sizes,labeldistance=2,autopct='%1.1f%%') ax.legend(labels=labels,loc="right",bbox_to_anchor=(0.9, 0, 0.5, 1)) ax.axis("equal") plt.show() #looking at the number of installs in the top 5 categories and their geners no_of_apps_category = google_apps["Category"].value_counts() no_of_apps_category[0:5] number_of_installs = google_apps["Installs"].groupby(google_apps["Category"]).sum() print(f"Number of installs in family: {number_of_installs.loc['FAMILY']}") print(f"Number of installs in Game: {number_of_installs.loc['GAME']}") print(f"Number of installs in Tools: {number_of_installs.loc['TOOLS']}") #Plotting a simple bar graph to represent the number of installs in each category plt.figure(figsize=(10,8)) sns.barplot(x="Category", y="Installs", data=google_apps, label="Total Installs", color="b") plt.xticks(rotation=90) plt.show() print("Top 3 categories in terms of number of installations are: Communication,Video Players and Entertainment") #Let's look at why family even though has lot of apps does not have the highest number of installs. Price could be one of the factors paid_apps = google_apps[google_apps["Price"] != 0.0] paid_family_apps = paid_apps[paid_apps["Category"]=="FAMILY"] paid_family_apps.count() paid_communications_apps = paid_apps[paid_apps["Category"]=="COMMUNICATION"] paid_communications_apps.count() #Let's visualize this in the form of a simple bar graph plt.figure(figsize=(10,8)) sns.barplot(x="Category", y="Price", data=paid_apps, label="Total Paid Apps in Each Category") plt.xticks(rotation=90) plt.show() #Ratings of the apps over various categories avg_rating = google_apps["Rating"].mean() print(avg_rating) plt.figure(figsize=(10,8)) sns.boxplot('Category','Rating',data=google_apps) plt.title("Distribution of Categorywise Ratings") plt.ylabel("Rating") plt.xlabel("Category") plt.xticks(rotation=90) # plt.savefig('data_images/plot3a_income.png',bbox_inches='tight') plt.show(); #Paid Vs free and the number of installs installs_greater_1000 = google_apps[google_apps["Installs"]>1000] installs_greater_1000 = installs_greater_1000.sort_values(['Price']) plt.figure(figsize=(20,20)) sns.catplot(x="Installs", y="Price",data=installs_greater_1000); plt.xticks(rotation=90) # plt.ytick.direction('out') plt.show() #take a deeper look at the apps priced more than $100 expensive_apps = google_apps[google_apps["Price"]>100] expensive_apps["Installs"].groupby(expensive_apps["App"]).sum() #number of installs Vs price Vs Category sns.relplot(x="Installs", y="Price", hue="Category", size="Rating", sizes=(200, 400), alpha=1, height=5, data=expensive_apps) plt.show() ``` # Conclusions ``` print(f"The Top three category of Apps based on the number of Apps are") print(f" - Family") print(f" - Game") print(f" - Tool") print(f"The bottom three category of Apps based on the number of Apps are") print(f" - Parenting") print(f" - Comics") print(f" - Beauty") print(f"This is not the case when we look at the number of intalls. Based on number of installs, Communication,Video players and entertainment are the top 3 categories") print(f"To find out why, I looked at the price of paid apps in each category and clearly, communication was priced less than the family apps. This could be one of the reasons") print(f"-----------------------------------------------------------------------------------------------------------------------------") print(f"The Average rating of the apps across all the categories is 4.17") print(f"-----------------------------------------------------------------------------------------------------------------------------") print(f"Users tend to download more free apps compared to paid apps. This being said, there are people who are willing to pay more than $100 for an app") print(f"-----------------------------------------------------------------------------------------------------------------------------") print(f"Based on the data, Users tend to buy apps which are priced $1 - $30 compared to other expensive apps") print(f"-----------------------------------------------------------------------------------------------------------------------------") print(f"There are 20 apps which cost above $100. Finance, Lifestyle and family being the top 3 categories.") print(f"-----------------------------------------------------------------------------------------------------------------------------") print(f"Among the most expensive apps, 'I am Rich' is the most popular app with the most number of installs") ```
github_jupyter
<center><h2>Assignment</h2></center> <h3>2.1. Problem Statement: PYTHON 1</h3> <b>1. Install Jupyter notebook and run the first program and share the screenshot of the output.</b> ``` str = "Hello Python. This is my First Program"; print(str); ``` <b>2. Write a program which will find all such numbers which are divisible by 7 but are not a multiple of 5, between 2000 and 3200 (both included). The numbers obtained should be printed in a comma-separated sequence on a single line.</b> ``` lst = []; for i in range(2000,3200): if(i%7 == 0 and i%5 != 0): lst.append(i); print(lst); ``` <b>3. Write a Python program to accept the user's first and last name and then getting them printed in the the reverse order with a space between first name and last name.</b> ``` firstName = input("Enter your first Name: "); lastName = input("Enter your last Name: "); print(firstName[::-1] +' '+ lastName[::-1]); ``` <b>4. Write a Python program to find the volume of a sphere with diameter 12 cm.</b> ``` pie = 3.14 diameter = 12 volume = (4/3)*pie*(12/2)**3 print("volume: {0:.2f}".format(volume)) ``` <h3>2.2. Problem Statement: PYTHON 2</h3> <b>1. Write a program which accepts a sequence of comma-separated numbers from console and generate a list.</b> ``` lst = input("Enter sequence: ") print(lst.split(',')) ``` <b>2 Create the below pattern using nested for loop in Python. * * * * * * * * * * * * * * * * * * * * * * * * * </b> ``` for i in range(1, 10): if i < 6: print('* '*i); else : print('* '*(10-i)); ``` <b>3. Write a Python program to reverse a word after accepting the input from the user.<br/> Sample Input - AcadGild <br/> Sample Output - dliGdacA </b> ``` input_str = input("Write Something:") print(input_str[::-1]) ``` <b>4. Write a Python Program to print the given string in the format specified in the<br/> Sample Input - WE, THE PEOPLE OF INDIA, having solemnly resolved to constitute India into a SOVEREIGN, SOCIALIST, SECULAR, DEMOCRATIC REPUBLIC and to secure to all its citizens<br/> <br/> Sample Output - <br/> WE, THE PEOPLE OF INDIA,<br/> &nbsp; having solemnly resolved to constitute India into a SOVEREIGN,<br/> &nbsp; &nbsp; SOCIALIST, SECULAR, DEMOCRATIC<br/> &nbsp; &nbsp; &nbsp; REPUBLIC and to secure to all its citizens </b> ``` print("WE, THE PEOPLE OF INDIA,"); print("\thaving solemnly resolved to constitute India into a SOVEREIGN,") print("\t\tSOCIALIST, SECULAR, DEMOCRATIC") print("\t\t\tREPUBLIC and to secure to all its citizens") ``` <h3>2.3. Problem Statement: PYTHON 3</h3> <b>1.1. Write a Python Program to implement your own myreduce() function which works exactly like Python's built-in function reduce()</b> ``` def myreduce(function, data): result = data[0] for item in data[1:]: result = function(result,item) return result #driver test def sum(x,y): return x+y; print(myreduce(sum, [1,2,3,4,5])) ``` <b>1.2. Write a Python program to implement your own myfilter() function which works exactly like Python's built-in function filter()</b> ``` def myfilter(function, data): result = [] for item in data: if function(item): result.append(item) return result #driver test def isGreaterthan5(x): if x>5: return True else: return False print(myfilter(isGreaterthan5, [2,6,1,8,9])) ``` <b>2. Write List comprehensions to produce the following Lists.</b> <b>['A', 'C', 'A', 'D', 'G', 'I', ’L’, ‘ D’]</b> ``` lst = [letter for letter in "ACADGILD"] print(lst) ``` <b>['x', 'xx', 'xxx', 'xxxx', 'y', 'yy', 'yyy', 'yyyy', 'z', 'zz', 'zzz', 'zzzz']</b> ``` Char_lst = [i*j for i in ['x', 'y', 'z'] for j in range(1,5)] print(Char_lst) ``` <b>['x', 'y', 'z', 'xx', 'yy', 'zz', 'xxx', 'yyy', 'zzz', 'xxxx', 'yyyy', 'zzzz']</b> ``` Char_lst = [i*j for j in range(1,5) for i in ['x', 'y', 'z']] print(Char_lst) ``` <b>[[2], [3], [4], [3], [4], [5], [4], [5], [6]]</b> ``` lst = [[x+y] for x in range(2,5) for y in range(3)] print(lst) ``` <b>[[2, 3, 4, 5], [3, 4, 5, 6], [4,5, 6, 7], [5, 6, 7, 8]]</b> ``` list_of_lst = [list(map(lambda x:x+i, [1,2,3,4])) for i in range(1,5)] print(list_of_lst) ``` <b>[(1, 1), (2, 1), (3, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3)]</b> ``` Pairs = [(y,x) for x in [1,2,3] for y in [1,2,3]] print(Pairs) ``` <b>3. Implement a function longestWord() that takes a list of words and returns the longest one.</b> ``` def longestWord(word_list): word_len = [] for item in word_list : word_len.append((len(item), item)) word_len.sort() return word_len[-1][1] print(longestWord(["Final", "Finally", "Finalize"])) ``` <h3>2.4. Problem Statement: PYTHON 4</h3> <b> 1.1 Write a Python Program(with class concepts) to find the area of the triangle using the below formula.<br/> area = (s*(s-a)*(s-b)*(s-c)) ** 0.5<br/> Function to take the length of the sides of triangle from user should be defined in the parent<br/> class and function to calculate the area should be defined in subclass. </b> ``` class Triangle: ''' Class to initialize Triangle object ''' def __init__(self, a,b,c): self.a = a self.b = b self.c = c class Area(Triangle): def __init__(self, *args): super(Area, self).__init__(*args) self.parimeter = (self.a+self.b+self.c)/2 def getArea(self): return (self.parimeter*(self.parimeter-self.a)*(self.parimeter-self.b)*(self.parimeter-self.c))**0.5 C = Area(2,4,5) print("Area of Triangle: {0:.2f}".format(C.getArea())) ``` <b>1.2 Write a function filter_long_words() that takes a list of words and an integer n and returns the list of words that are longer than n.</b> ``` def filter_long_words(words_list, n): result = [] for item in words_list : if len(item) > n: result.append(item) return result #driver test words = ["Apple", "Orange", "PineApple", "Guava"] length =5 print("Words in {} greater than length {} are {}".format(words, length, filter_long_words(words,length))) ``` <b>2.1 Write a Python program using function concept that maps list of words into a list of integers representing the lengths of the corresponding words.</b> ``` def maplength(words_list): result = [] for item in words_list: result.append(len(item)) return result #driver test words = ["Apple", "Orange", "PineApple", "Guava"] print("length of words in {} are {}".format(words, maplength(words))) ``` <b>2.2 Write a Python function which takes a character (i.e. a string of length 1) and returns True if it is a vowel, False otherwise.<b/> ``` def checkVowel(char): if len(char) > 1: return False elif char.lower() in ['a', 'e', 'i', 'o', 'u']: return True else: return False #driver test char = 'a' print("Is {} vowel?: {}".format(char, checkVowel(char))) char = 'b' print("Is {} vowel?: {}".format(char, checkVowel(char))) ``` <h3>2.5. Problem Statement: PYTHON 5</h3> <b>1. Write a function to compute 5/0 and use try/except to catch the exceptions.</b> ``` def Compute(): try: print(5/0) except ZeroDivisionError: print("You cannot divide by 0") else: print("Division done successfully") Compute(); ``` <b>2. Implement a Python program to generate all sentences where subject is in ["Americans", "Indians"] and verb is in ["Play", "watch"] and the object is in ["Baseball","cricket"].</b> ``` subjects = ["Americans", "Indians"] verbs = ["play", "watch"] objects = ["Baseball", "Cricket"] for sub in subjects: for verb in verbs: for obj in objects: print(sub+" "+verb+" "+obj) ```
github_jupyter
###### ECE 283: Homework 2 ###### Topics: Classification using neural networks ###### Due: Monday April 30 - Neural networks; Tensorflow - 2D synthetic gaussian mixture data for binary classification ### Report ---------------------------------------- ##### 1. Tensorflow based neural network - 2D Gaussian mixture is synthesized based on the provided mean, covariances for class 0 and 1. - Training, validation and test sample counts are 70, 20, and 10 respectively ##### (a) One hidden layer: Implementation code below In[7] : oneHiddenNeuralNetwork() Below are the parameters that are used to run training for this network. The validation data is used to compute loss/accuracy in order to tune the hyper parameters. ``` Hyper Parameters learning_rate = 0.001 num_steps = 1000 batch_size = 1000 display_step = 100 reg_const_lambda = 0.01 Network Parameters n_hidden_1 = 9 # 1st layer number of neurons num_input = 2 # data input (shape: 2 * 70) num_classes = 1 # total classes (0 or 1 based on the value) ``` ###### Execution: 1. Without input preprocessing: Single Layer Network > Log > - Trn Step 1, Minibatch Loss= 2.3662, Accuracy= 49.500 > - Val Step 1, Minibatch Loss= 2.4016, Accuracy= 48.800 > - Trn Step 100, Minibatch Loss= 1.8325, Accuracy= 58.437 > - Val Step 100, Minibatch Loss= 1.8935, Accuracy= 57.050 > - Trn Step 1000, Minibatch Loss= 0.6166, Accuracy= 79.854 > - Val Step 1000, Minibatch Loss= 0.6331, Accuracy= 79.000 > - Test Accuracy: 80.800 > - Diff Error: 192/1000 2. With input preprocessing: Single Layer Network > Log > - Trn Step 1, Minibatch Loss= 1.3303, Accuracy= 30.100 > - Val Step 1, Minibatch Loss= 1.6977, Accuracy= 33.150 > - Trn Step 100, Minibatch Loss= 1.0398, Accuracy= 36.600 > - Val Step 100, Minibatch Loss= 1.2065, Accuracy= 37.400 > - Trn Step 1000, Minibatch Loss= 0.5143, Accuracy= 80.700 > - Val Step 1000, Minibatch Loss= 0.5572, Accuracy= 76.700 > - Test Accuracy: 77.100 > - Diff Error: 229/1000 ###### Observations: Q 1,2,3,4 1. The number of neurons here are 10 which provided more accuracy over single neuron. Upon changing the number of neurons from 1 to 10 we see a jump of accuracy from 50% to 75%. However growing neurons beyond 10 does not provide much benefit/accuracy change on the validation data. Which says that training further may be overfitting to the training dataset. 2. Training samples are 70% and validation samples are 20%. When we run for 1000 steps/epoch with batch size 1000 on a learning rate of 0.001. We see that training loss converges towards 0.5572, while training accuracy converges from 30% to 80%. The validation values appear to be peaks at 77%. Training was stopped when we saw consistent convergence and similar accuracy on the validation and the test dataset. > - Upon changing the learning rate to a higher value like 1 we see that convergence is an issue. This was observed since the data kept alternating between two values consistently, irrespective of the iterations ran. When learning rate is of the order 10**(-3) then we see the convergence in the data. > - The L2 regularization constant will penalize the square value of the weights and it is set to 0.01 here. When we changed the value to say 10 it will allow for a higher order coefficient to affect and may cause over fitting. However, it does not seem to affect the results here and it may be due to the fact that the higher order coefficient do not affect this data. > - Final Test Accuracy: 77.1% 3. Input pre-processing and Weight Initialization > Normalization/input-preprocessing is achieved by subracting the mean and scaling with standard deviation. > - The function getNextTrainBatch() was without normalization and gave the results in sections 1 and 2 above. Upon using normalized batch training data using getNextNormalizedTrainBatch() function we have the following observations, > - The convergence was relatively faster than before (1 Step) > - The batch loss reduced to ~0.5572 while the accuracy on test was around 77% > Weight initialization has a major impact since these multipliers lead to vanishing or exploding gradients issue. > - In the current scenario we have used random_normal distribution for initialization. In the currnt scenario since the convergence is fast and data is separable we do not see any difference by using uniform initialization. However there are datasets that demonstrate the empirical benefit of using a uniform distribution for initializing weights. 4. Comparing the performance of neural network from HW1 (MAP, Kernelized Logistic Regression and Logistic Regression with feature engg) > - We observed a probability of error around 23% here. > - Misclassification rate in MAP was around 16% for class0 and 47% for class1 > - Misclassification in Kernelized Logistic regression was slightly better than the MAP > - Misclassification for Logistic regression by feature engineering was around 56% for class0 and 10% for class1 However if we see the overall misclassification error rate we get a great accuracy of about 77%-81% using the neural network technique here. We can be sure that upon learning more data this technique will provide a better accuracy. ##### (b) Two hidden layer: Implementation code below In[11] : twoHiddenNeuralNetwork() Below are the parameters that are used to run training for this network ``` Hyper Parameters learning_rate = 0.001 num_steps = 1000 batch_size = 1000 display_step = 100 reg_const_lambda = 0.01 Network Parameters n_hidden_1 = 4 # 1st layer number of neurons n_hidden_2 = 4 # 2nd layer number of neurons num_input = 2 # data input (shape: 2 * 70) num_classes = 1 # total classes (0 or 1 based on the value) ``` ###### Execution: 1. Without input preprocessing: Two Layer Network > Log > - Trn Step 1, Minibatch Loss= 1.8265, Accuracy= 67.295 > - Val Step 1, Minibatch Loss= 1.9003, Accuracy= 66.800 > - Trn Step 100, Minibatch Loss= 1.2101, Accuracy= 80.126 > - Val Step 100, Minibatch Loss= 1.2648, Accuracy= 80.550 > - Trn Step 1000, Minibatch Loss= 1.0394, Accuracy= 83.812 > - Val Step 1000, Minibatch Loss= 1.0760, Accuracy= 83.750 > - Test Accuracy: 83.600 > - Diff Error: 164/1000 2. With input preprocessing: Two Layer Network > Log > - Trn Step 1, Minibatch Loss= 2.0676, Accuracy= 30.800 > - Val Step 1, Minibatch Loss= 2.1635, Accuracy= 27.600 > - Trn Step 100, Minibatch Loss= 0.8971, Accuracy= 51.700 > - Val Step 100, Minibatch Loss= 1.0530, Accuracy= 51.000 > - Trn Step 1000, Minibatch Loss= 0.6649, Accuracy= 80.600 > - Val Step 1000, Minibatch Loss= 0.6496, Accuracy= 83.100 > - Test Accuracy: 81.900 > - Diff Error: 181/1000 ###### Observations: Additional observations for two layer network only 1. In this case we see a better result when compared to the single network but that may not always the case. Upon increasing the number of layers to two we add more capacity. By doing this we allow for complex fitting of the weights which leads to good results on the training data and I.I.D. test data. 2. On increasing the learning rate we see that the convergence is quick around the loss value of 0.6496 > - Final Test Accuracy: 81.900 > - All the same observations as described for a single layer network as above. However here we see that due to higher capacity a better linear accuracy is observed at every neuron count. 3. The number of neurons here are 4 each and provides a peak accuracy at that value. An overfitting may be occurring beyond that. > - The test accuracy is 82% but we also see that the training and validation accuracy are 81% and 83% respectively. This is data specific and can be improved by increasing the size of the training data. 10000 test samples is not a great sample to compute a general accuracy for the entire dataset. ![Num. of Neurons VS. Accuracy](network_perf.png) # Code Section ``` # -*- coding: utf-8 -*- import tensorflow as tf import numpy as np from math import * import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from scipy.stats import norm from IPython.display import Image, display, Math, Latex # Params n_inpoints = 10000 def generateClass0(): theta0 = 0 lmb01 = 2 lmb02 = 1 m0 = (0, 0) # computing u * u.T and later multiplying with lambda cov01 = [[(cos(theta0))**2, cos(theta0)*sin(theta0)], [(sin(theta0))*cos(theta0), (sin(theta0))**2]] cov02 = [[(sin(theta0))**2, -(cos(theta0)*sin(theta0))], [-(cos(theta0)*sin(theta0)), (cos(theta0))**2]] cov0 = lmb01*np.matrix(cov01) + lmb02*np.matrix(cov02) cov0_det = np.linalg.det(cov0) x0, y0 = np.random.multivariate_normal(m0, cov0, int(n_inpoints/2)).T return x0,y0 x0, y0 = generateClass0() plt.scatter(x0, y0, color = 'r',marker='x', label = 'Cl 0') plt.legend() plt.title('Distribution of Class 0') plt.show() def generateClass1(): # Mixture A theta1a = -3*pi/4 lmb1a1 = 2 lmb1a2 = 1/4 m1a = (-2, 1) cov1a = [[(cos(theta1a))**2, cos(theta1a)*sin(theta1a)], [(sin(theta1a))*cos(theta1a), (sin(theta1a))**2]] cov2a = [[(sin(theta1a))**2, -(cos(theta1a)*sin(theta1a))], [-(cos(theta1a)*sin(theta1a)), (cos(theta1a))**2]] cov1a = lmb1a1*np.matrix(cov1a) + lmb1a2*np.matrix(cov2a) cov1a_det = np.linalg.det(cov1a) x1a, y1a = np.random.multivariate_normal(m1a, cov1a, int(n_inpoints/2)).T #print('Shape: ',x1a.shape,', ',y1a.shape,', ',cov1a) # Mixture B theta1b = pi/4 lmb1b1 = 3 lmb1b2 = 1 m1b = (3, 2) cov1b = [[(cos(theta1b))**2, cos(theta1b)*sin(theta1b)], [(sin(theta1b))*cos(theta1b), (sin(theta1b))**2]] cov2b = [[(sin(theta1b))**2, -(cos(theta1b)*sin(theta1b))], [-(cos(theta1b)*sin(theta1b)), (cos(theta1b))**2]] cov1b = lmb1b1*np.matrix(cov1b) + lmb1b2*np.matrix(cov2b) cov1b_det = np.linalg.det(cov1b) x1b, y1b = np.random.multivariate_normal(m1b, cov1b, int(n_inpoints/2)).T #print('Shape: ',x1b.shape,', ',y1b.shape,', ',cov1b) # Class 1 (A * 0.33 +B * 0.66) y1 = np.array(y1a)* (1 / 3)+np.array(y1b)* (2 / 3) x1 = np.array(x1a)* (1 / 3)+np.array(x1b)* (2 / 3) return x1,y1 x1, y1 = generateClass1() plt.scatter(x1, y1, color = 'b',marker='^', label = 'Cl 1') plt.title('Distribution of Class 1') plt.legend() plt.show() x = np.concatenate((x0, x1)) y = np.concatenate((y0, y1)) print('Shape; X:',x.shape,', Y:',y.shape) plt.scatter(x0, y0, color = 'r',marker='x', label = 'Cl 0') plt.scatter(x1, y1, color = 'b',marker='^', label = 'Cl 1') plt.legend() plt.show() c0 = np.vstack((x0, y0)).T c1 = np.vstack((x1, y1)).T # ---------------------------------------- # Set up the [xi, yi] training data vector # ---------------------------------------- X = np.concatenate((c0,c1), axis = 0) Y = np.array([0]*int(n_inpoints/2) + [1]*int(n_inpoints/2)).reshape(n_inpoints,1) ``` ### Training, test and validation sets (70:20:10) ##### Without Normalization ``` # Divide the data into Train Valid, Test tot_count = n_inpoints trn_count = int(0.7 * tot_count) val_count = int(0.2 * tot_count) tst_count = int(0.1 * tot_count) # Shuffle X & Y values sfl_idx = np.arange(0,tot_count) np.random.shuffle(sfl_idx) Xc0 = X[:,0] Xc1 = X[:,1] Xc0 = Xc0.reshape(tot_count,1) Xc1 = Xc1.reshape(tot_count,1) print(Xc1.shape) train_X0 = Xc0[sfl_idx[np.arange(0,trn_count)]] train_X1 = Xc1[sfl_idx[np.arange(0,trn_count)]] train_Y = Y[sfl_idx[np.arange(0,trn_count)]] n_samples = train_X1.shape[0] valid_X0 = Xc0[sfl_idx[np.arange(trn_count,trn_count+val_count)]] valid_X1 = Xc1[sfl_idx[np.arange(trn_count,trn_count+val_count)]] valid_X = np.vstack((valid_X0.T, valid_X1.T)) valid_Y = Y[sfl_idx[np.arange(trn_count,trn_count+val_count)]] tests_X0 = Xc0[sfl_idx[np.arange(trn_count+val_count, tot_count)]] tests_X1 = Xc1[sfl_idx[np.arange(trn_count+val_count, tot_count)]] tests_X = np.vstack((tests_X0.T, tests_X1.T)) tests_Y = Y[sfl_idx[np.arange(trn_count+val_count, tot_count)]] batchIndex = 0 def getNextTrainBatch(size): global batchIndex if((batchIndex + size) >= trn_count): size = trn_count-1 batchIndex = 0 # recycle the batches from start #trn_sfl_idx = np.arange(0,trn_count) #np.random.shuffle(trn_sfl_idx) trn_X0_r1 = train_X0[np.arange(batchIndex, batchIndex + size)] trn_X1_r1 = train_X1[np.arange(batchIndex, batchIndex + size)] trn_Y_r1 = train_Y[np.arange(batchIndex, batchIndex + size)] #print(trn_X0_r1.shape) trn_X = np.vstack((trn_X0_r1.T, trn_X1_r1.T)) #print((trn_X.T).shape) batchIndex = batchIndex + size return trn_X.T, trn_Y_r1 print('Train: ',train_X0.shape, train_Y.shape) print('Valid: ',valid_X.shape, valid_Y.shape) print('Tests: ',tests_X.shape, tests_Y.shape) ``` ##### With Normalization ``` # ------------------- # Normalize the data # ------------------- # Mean train_X0_mean = np.mean(train_X0) train_X1_mean = np.mean(train_X1) # Standard deviation train_X0_stddev = np.std(train_X0) train_X1_stddev = np.std(train_X1) # Normalization by scaling using standard deviation train_X0_nrm = (train_X0 - train_X0_mean)/train_X0_stddev train_X1_nrm = (train_X1 - train_X1_mean)/train_X1_stddev print(train_X0_nrm.shape) print(train_X1_nrm.shape) train_X_nrm = np.vstack((train_X0_nrm.T, train_X1_nrm.T)) def getNextNormalizedTrainBatch(size): global batchIndex batchIndex = 0 if((batchIndex + size) >= trn_count): size = trn_count-1 batchIndex = 0 # recycle the batches from start # Shuffle the dataset each time trn_sfl_idx = np.arange(batchIndex, batchIndex + size) np.random.shuffle(trn_sfl_idx) trn_X0_r1 = train_X0_nrm[trn_sfl_idx[np.arange(batchIndex, batchIndex + size)]] trn_X1_r1 = train_X1_nrm[trn_sfl_idx[np.arange(batchIndex, batchIndex + size)]] trn_Y_r1 = train_Y[trn_sfl_idx[np.arange(batchIndex, batchIndex + size)]] #print(trn_X0_r1.shape) trn_X = np.vstack((trn_X0_r1.T, trn_X1_r1.T)) #print((trn_X.T).shape) batchIndex = batchIndex + size return trn_X.T, trn_Y_r1 print('Train: ',train_X_nrm.shape, train_Y.shape) #print('Valid: ',valid_X.shape, valid_Y.T) #print('Tests: ',tests_X.shape, tests_Y.T) def linearRegression(): # Parameters learning_rate = 0.01 training_epochs = 500 display_step = 50 rng = np.random # tf Graph Input Xtf = tf.placeholder(tf.float32, [None, 1]) Ytf = tf.placeholder(tf.float32, [None, 1]) # Set model weights # figure tf.rand # tf.keras.initializer Wtf = tf.Variable(np.zeros([1,1]), dtype=tf.float32, name="weight") btf = tf.Variable(np.zeros([1,1]), dtype=tf.float32, name="bias") # Construct a linear model predtf = tf.add(tf.matmul(Xtf, Wtf), btf) # Mean squared error costtf = tf.reduce_sum(tf.pow(predtf-Ytf, 2))/(2*n_samples) # Gradient descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(costtf) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training with tf.Session() as sess: sess.run(init) # Fit all training data for epoch in range(training_epochs): sess.run(optimizer, feed_dict={Xtf: train_X1, Ytf: train_Y}) #Display logs per epoch step if (epoch+1) % display_step == 0: c = sess.run(costtf, feed_dict={Xtf: train_X1, Ytf:train_Y}) print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \ "W=", sess.run(Wtf), "b=", sess.run(btf)) print("Optimization Finished!") training_cost = sess.run(costtf, feed_dict={Xtf: train_X1, Ytf: train_Y}) print("Training cost=", training_cost, "W=", sess.run(Wtf), "b=", sess.run(btf), '\n') #Graphic display plt.plot(train_X, train_Y, 'ro', label='Original data') plt.scatter(x0, y0, color = 'r',marker='x', label = 'Cl 0') plt.scatter(x1, y1, color = 'b',marker='^', label = 'Cl 1') plt.plot(train_X1, sess.run(Wtf) * train_X1 + sess.run(btf), label='Fitted line') plt.legend() plt.show() sess.close() # Run Linear Regression linearRegression() ``` ### Neural Network implementation - 1.(a) One hidden layer ``` def oneHiddenNeuralNetwork(): # Parameters learning_rate = 0.001 num_steps = 1000 batch_size = 1000 display_step = 100 reg_const_lambda = 0.01 # Network Parameters n_hidden_1 = 9 # 1st layer number of neurons num_input = 2 # data input (shape: 2 * 70) num_classes = 1 # total classes (0 or 1 based on the value) # tf Graph input X = tf.placeholder("float", [None, num_input]) Y = tf.placeholder("float", [None, num_classes]) # Store layers weight & bias (initializing using random nromal) weights = { 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 'out': tf.Variable(tf.random_normal([n_hidden_1, num_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'out': tf.Variable(tf.random_normal([num_classes])) } # Create model def one_neural_net(x): # Hidden fully connected layer, a1 layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) #layer_1 = tf.nn.relu(layer_1) # Output fully connected layer with a neuron for each class out_layer = tf.matmul(layer_1, weights['out']) + biases['out'] return out_layer # Construct model logits = one_neural_net(X) output = tf.sigmoid(logits) # Convert output to a probability # Define loss and optimizer cel_loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=Y)) reg_loss = tf.nn.l2_loss(weights['h1']) + tf.nn.l2_loss(weights['out']) # L2 regularization loss_op = tf.reduce_mean(cel_loss_op + reg_const_lambda*reg_loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model (with test logits, for dropout to be disabled) # keep in mind boolean to float32 tensor output #correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1)) correct_pred = tf.cast(tf.greater(output, 0.5), tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(correct_pred, Y), tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training batchIndex = 0 with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, num_steps+1): batch_x, batch_y = getNextNormalizedTrainBatch(batch_size) # Run optimization op (backprop) # print(batch_x) # print(batch_y) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Training batch loss and accuracy loss, acc, pred = sess.run([loss_op, accuracy, correct_pred], feed_dict={X: batch_x, Y: batch_y}) print("Trn Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Accuracy= " + \ "{:.3f}".format(100*acc)) #print("actuals:", batch_y.T) #print("predict:", pred.T) print("differr:", (pred.T != batch_y.T).sum()) # Validation accuracy loss_v, acc_v, pred_v = sess.run([loss_op, accuracy, correct_pred], feed_dict={X: valid_X.T, Y: valid_Y}) print("Val Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss_v) + ", Accuracy= " + \ "{:.3f}".format(100*acc_v)) #print("actuals:", valid_Y.T) #print("predict:", pred_v.T) print("differr:", (pred_v.T != valid_Y.T).sum()) print("Optimization Finished!") # Calculate accuracy for test data acc_t, pred_t = sess.run([accuracy,correct_pred], feed_dict={X: tests_X.T, Y: tests_Y}) print("Test Accuracy:", "{:.3f}".format(100*acc_t)) print("actuals:", tests_Y.shape) print("predict:", pred_t.shape) print("differr:", (pred_t.T != tests_Y.T).sum()) sess.close() # Execute oneHiddenNeuralNetwork() ``` - 1.(b) Two hidden layer ``` def twoHiddenNeuralNetwork(): # Parameters learning_rate = 0.01 num_steps = 1000 batch_size = 1000 display_step = 100 reg_const_lambda = 0.01 # Network Parameters n_hidden_1 = 4 # 1st layer number of neurons n_hidden_2 = 4 # 2nd layer number of neurons num_input = 2 # data input (shape: 2 * 70) num_classes = 1 # total classes (0 or 1 based on the value) # tf Graph input X = tf.placeholder("float", [None, num_input]) Y = tf.placeholder("float", [None, num_classes]) # Store layers weight & bias (initializing using random nromal) weights = { 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 'out': tf.Variable(tf.random_normal([num_classes])) } # Create model def two_neural_net(x): # Hidden fully connected layer, a1 layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) layer_1 = tf.nn.relu(layer_1) layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) layer_2 = tf.nn.relu(layer_2) # Output fully connected layer with a neuron for each class out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] return out_layer # Construct model logits = two_neural_net(X) output = tf.sigmoid(logits) # Convert output to a probability # Define loss and optimizer cel_loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=Y)) reg_loss = tf.nn.l2_loss(weights['h1']) + tf.nn.l2_loss(weights['h2']) + tf.nn.l2_loss(weights['out']) # L2 regularization loss_op = tf.reduce_mean(cel_loss_op + reg_const_lambda*reg_loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) # Evaluate model (with test logits, for dropout to be disabled) # keep in mind boolean to float32 tensor output #correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1)) correct_pred = tf.cast(tf.greater(output, 0.5), tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(correct_pred, Y), tf.float32)) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() # Start training batchIndex = 0 with tf.Session() as sess: # Run the initializer sess.run(init) for step in range(1, num_steps+1): batch_x, batch_y = getNextNormalizedTrainBatch(batch_size) # Run optimization op (backprop) # print(batch_x) # print(batch_y) sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) if step % display_step == 0 or step == 1: # Training batch loss and accuracy loss, acc, pred = sess.run([loss_op, accuracy, correct_pred], feed_dict={X: batch_x, Y: batch_y}) print("Trn Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss) + ", Accuracy= " + \ "{:.3f}".format(100*acc)) #print("actuals:", batch_y.T) #print("predict:", pred.T) print("differr:", (pred.T != batch_y.T).sum()) # Validation accuracy loss_v, acc_v, pred_v = sess.run([loss_op, accuracy, correct_pred], feed_dict={X: valid_X.T, Y: valid_Y}) print("Val Step " + str(step) + ", Minibatch Loss= " + \ "{:.4f}".format(loss_v) + ", Accuracy= " + \ "{:.3f}".format(100*acc_v)) #print("actuals:", valid_Y.T) #print("predict:", pred_v.T) print("differr:", (pred_v.T != valid_Y.T).sum()) print("Optimization Finished!") # Calculate accuracy for test data acc_t, pred_t = sess.run([accuracy,correct_pred], feed_dict={X: tests_X.T, Y: tests_Y}) print("Test Accuracy:", "{:.3f}".format(100*acc_t)) print("actuals:", tests_Y.shape) print("predict:", pred_t.shape) print("differr:", (pred_t.T != tests_Y.T).sum()) sess.close() # Execute twoHiddenNeuralNetwork() ``` ### Results ``` num_neurons = np.arange(0, 15) accuracy_1_net = [50,66,57,72,75,72,74,69,77,75,74,70,70,74,75] accuracy_2_net = [74,67,78,82,73,78,79,75,78,79,80,80,80,78,80] plt.plot(num_neurons, accuracy_2_net, c = 'red' , label = 'Two Layer Network') plt.plot(num_neurons, accuracy_1_net, c = 'blue' , label = 'One Layer Network') plt.legend() plt.title("Number of Neurons vs Accuracy") plt.show() ```
github_jupyter
``` import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KDTree from sklearn.decomposition import PCA #### Visulization imports import pandas_profiling import plotly.express as px import seaborn as sns import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot df_april_19 = pd.read_csv('../data/SpotifyAudioFeaturesApril2019.csv') df_nov_18 = pd.read_csv('../data/SpotifyAudioFeaturesNov2018.csv') df = pd.concat([df_april_19, df_nov_18], ignore_index=True) print(df.shape) assert df.shape[0] == (df_april_19.shape[0] + df_nov_18.shape[0]) df = df.drop_duplicates(subset = 'track_id', keep='first') print(df.shape) # number_of_songs = 200 # remove categoricals df_numerics = df.drop(columns=['track_id', 'track_name', 'artist_name']) # Scale Data To Cluster More Accurately, and fit clustering model df_scaled = StandardScaler().fit_transform(df_numerics) df_modeled = KDTree(df_scaled) # Querying the model for the 15 Nearest Neighbors dist, ind = df_modeled.query(df_scaled, k=(number_of_songs+1)) # Putting the Results into a Dataframe dist_df = pd.DataFrame(dist) # Calculating the Distances scores = (1 - ((dist - dist.min()) / (dist.max() - dist.min()))) * 100 # Creating A New Dataframe for the Distances columns = ['Searched_Song'] for i in range(number_of_songs): columns.append(f'Nearest_Song{i}') dist_score = pd.DataFrame(scores.tolist(), columns = columns) # An Array of all indices of the nearest neighbors ind[:(number_of_songs+1)] # Making an array of the Track IDs song_ids = np.array(df.track_id) # A function that creates list of the each song with its nearest neighbors def find_similars(song_ids, ind): similars = [] for row in ind: ids = [song_ids[i] for i in row] similars.append(ids) return similars # using the above function nearest_neighbors = find_similars(song_ids, ind) # putting the results into a dataframe nearest_neighbors_df = pd.DataFrame(nearest_neighbors, columns=columns) ``` ## 3D Representation of a Random Sample From Dataset, Visualized Spacially ``` fig = px.scatter_3d(df.sample(n=5000, random_state=69), x='acousticness', y='liveness', z='tempo', color='loudness', size='popularity', opacity=.7, hover_name='track_name', color_discrete_sequence=px.colors.sequential.Plasma[-2::-1], template="plotly_dark") fig.show() ``` # A variety of Song Selections along with 200 Song recommendations ## Notice how they generally follow the same trajectory along the path across the features This helping to Visually convey how Songs are recommended based on songs nearest to in terms of quantifable Audio Features such as accoustiness, danceability, energy etc. ``` id_numbers = ''' 16UKw34UY9w40Vc7TOkPpA 7LYb6OuJcmMsBXnBHacrZE 0Lpsmg0pmlm1h1SJyWPGN2 6T8CFjCR5G83Ew3EILL60q 5ba3vTyegTVbMoLDniANWy 6VK3ZdppJW3Q6I1plyADxX 47nZUjQa9NZb7Nheg8gSj0 5P42OvFcCn5hZm8lzXqNJZ 77RsQL1RDECVnB3LL7zhTF 2vqZnmBn0REOMmNp5pMTJz 1dLHaoG70esepC2eC0ykV4 4SUQbrebZgvSX8i3aYHMB6 4D0Xgaln0O8K8LK2gjwpr8 5ipjhrirlnBV7BMY7QV3H5 2lvkak4Ik64c4vlAQyek12 0t4JgAUj8ZCbWOwSU9h4nt 1RjYRvWpZeh9vMjjKzpH3w 0YELRuijk4XsKWvyoWY7jI 3Xn791JUhuITZdLsIuKuQQ 1Y2wWhbLCHW0WfTczmuA2X 65CE7YGQzGY4p1MqnfWYZt 6a6zG2o8geJvBVJkDkFCHQ 4Vcqv8zsfoNpxr7dWEJi48 2sfcE3uPqDObs5COsvk7QJ 2gz8HI5hZew7abJ9gcLY7J 2UFpXorq5JOIctCwcmDyZ5 7pNNFcYN2N1T0lOKMHL8u9 7deuaj4pjJqxWVky0jcFrd 2eCdpRpnYLp4fj0iMNra3p 5WyXaXmMpo1fJds5pzmS4c 2HLNwAHYH7Ejs2rZLLyrmj 0wXjzthQdMd7SZu2kNwsVC 3EnzqTwdFWe68x0OTxR9T5 50rPhDfxSL2kmEovmXqTNf 3VY3JjW7T0f49JqdFlvqIV 458Cn793jgrNc6miDUSAiK 40XOJ16Zc7pqgqYq9o7wjS 0QuuDvOB9fZ49pZ2cIdEdw 1f5aQjgYy4mKjA7EgJJvLY 1QJjIWHLf05mUQPq3N2hxZ 0wrhAauh8QSw2DFDi6ZHFV 2K55wT0q49n54mZmA3hqS8 6glST22VPJZRTKvxecHSp6 0lvEyZrkTDg0vK9luhcjZg 5YaV62mxj62GSlXvwzgG3J 6yC44aQAf9AALUyJPimZ11 1frCKo4D3lktaPHfkyEuHo 3hXsGl1WdOuKye1aHo6pF7 40NAjxDw25daUXVt1b0A0D 0bkPHOwWOIG6ffwJISGNUr 6w3401sQAMkeKdQ3z3RPXt 56UwCbkvU1p3vHTnlbv3kS 04MkdoV7vxprPhtYA0Cx5y 7AesCHBrKOy4Npkxt907mG 5B7w6neMDX6BYPJdb6ikRE 4AowP9TvejSnEpxxJigpyn 4M9onsaj8IxHJEFVezMRoA 2DRNLTuiZr3MdFNfEHzWfz 4Wo5LyWddbPCogBIBrkhlt 0UJmSMFB05CyY3dTps6g2c 7nZR4x2aHeIyzAtrMi4Wua 6UZVW9DjfRKrcIVco5uwc1 2O1FwU85kgG0SJGJhszkB0 4OK4tHSUnCXpBfIusCOdAo 0MfWpTp3GrJ51bNxLanyy1 5DVsV3ZetLbmDUak9z0d1E 3ki056t9qL4g9GHWkPFJYe 4WCNiW7DJFE6h94q5NPZmZ 3N0Q5ce0Q3v6MmcNwaGG2p 7rQFDOKqUEaXE6X6Of4HTw 0wi0Hn8puUPmYdZ0JvpG2H 5wMD46niyehV3y5HfeQpNf 1nTn4pZhcgfRPobs43xrvL 0NxPZvt6UYWLgTbvjCJd2n 7fdHvtur1uLx5crFzAfWJ2 5AZt6HoqpUdHyhia36Khtc 1exbNAnvvYLYsEFESsCjDO 27ZfYwqic7RnwuitxJZiE9 2iPvO3ctXFGlkzOsx6iWyn 2w8g5LJzKqez8mENuk2pbL 3aBmFnfx9QfLB3knrKr1Mo 4UUA76EBTJzcICr2nNyhnV 4aV1txuotqBFGLB2jwiogo 7ASmnEp32JgxgH76TAaWwo 344WuUSk6SRQd9849fkAct 7aXH7YjPAixvHIPxCKxwIo 1CakWoqY0bPK9Ov8UocFTR 2B9VQlYlq6CUH0VXdQqB4y 3gCPlZpymjidx564rWcPHX 691J2jGivJasVLkWU11dpU 0ulEzQTIdtZGvYH3mkK84G 2XpxTgvloEbIIVfEt4XUKt 4dqcedp9451K9DvxYugrTt 2Y6IAs1aCdb4rzFfGjONUo 7LDtRLCz9D5DOR31jQZ65m 0oliuZWC43aafuxqNlGuxy 0Ks2NJH2PCxyWAFPlI4p9B 7oLqoswT2hfCG90crbiToe 11wZ39zESerUTPXKWhx7QE 4HWfA0iD0gXuL6gVreNYTL 5EFw2MVleUknhnPzfrCrTq 2drp4ajf2V2xUvV79EmzMw 6KL8uR3Y3JjFpzzLQFBzQa 0SYo2aRh2MYfBoJAFOYtNs 6Iq5a3BvMSx6X7auul0yDE 6TZUjNnW4qHI9wPrO54L5o 4v3s1AdtPSBxFK93PNMFSg 7FM6VwHNF3EWQTyiloogTV 3FNbf1Qt2ycepS4fasCuOm 2qK9xZkbBrTRiDw2dnJul8 5ozbdCZw5MZmJryCOyDYO1 0M82DdRxHFedS7fg7Gk2qB 6k1Epe9JbePsbdq0EZCc4i 63TMt7zR9YLpNBpzRYLG5I 6tbdFaJWas52BT8DZH76Xj 4V7gH33fKlEhX4d1uk2xYB 6jY7PeOZ4P6ww8XuzCyGfO 3m4nvQbC1n3dm6SbYIDbDR 6J5ArwJqeLHFKNfHcDP6OG 4RlzULwFEYBjTJNhc7frWm 1kZ0mav2lhlhXf4fWjw5Nc 0gJBsp5q8Ro6zXxKzT4DiQ 0CWuF6SrEXmfM0EDIERBS1 0ogRPfqHhhZuaeeVt02L0Z 4AEJ6dqjb3uo7K9R2xKGJ0 0b4akisi6edx4RkU3VO1XW 2xLzmImDWvk0jw92tTsnHk 2PFvERcsENO2mSXV2abmMW 57miVDdQOiOx7ZNaEjGaFC 0LdkVfBGmZUOKf8sway0tM 5GtQkJTQ01zxZ9xRuIBRyY 1LX7SGrc4FIE6LnzV498Ow 2l3OlYqGIiJrPByZNx8Ll6 1yCb0FSeO48efDRg80Turo 3r5OR32RDkcp3eIQ2ylF5o 3grKLoUX87NaEkvouW0vmz 7ts8ZBKNCtJvd0ijGxTgCw 6LSlTgBUF1T8rBrTKtzbWB 0VCTFk3PtHHTbCdiI2SNf6 5flKCotkqTK0SRHyu9ywOE 7FNVvZKIFb5VIwyY4tCMXt 1mc6PrRRhSipTHKSLRuv5B 1s7X6ZKOMhP2luohWVXNNP 5WPjMN7nxk2HqcPfewseyz 2rX3PbfV6OrObng2YL9Osd 6ahWJqh8GQag4OWmyRbcnE 3ZYN2cfyCFn4NuWxEW9tuh 3DchJOgF4JUzQJyoAVePa7 1fhnlsDdCLs1Oi5X3oVCTD 3T0UOBcMTeytq7RmFDZMbu 14gtLymOStY8niLakJlbf8 677SnHIc0M92Nb6XUnaSCT 1t2hs48AduLr9wik6nF0pw 3QavdjzqIxMUPeSXgoA4Di 4LK5o7buDJB9A3aL86y5dR 1JAGP2PPls6WXahoN9IM14 0uteQpEpt2XpZ99ZT7m0eA 0zm5v1li5HwBcFJZzXz2Iq 7epZd4ZUwXGq5CTOwW9EO7 1R8ihhEOnbscF8kheDNC0H 5gYUBAE3o6k5yBv2Ni7KwQ 4EuW6g3eq56jUDqdNbUryM 727FY7suhFAVmwP3tsg6uG 2j9tX4ubo2WISo9GIJLySx 3QUtbFgjjnAHTtLup31xVa 6viaOSezCxDApUQlIc8mhA 3J0ZbecfqYszqlQJKYswVV 10aAr61dsWKA9RRdAmk2CM 7gE8QvR9Pxl7G2ey8XFtwa 6RF6zRVTz1FUYzBhop3jen 2stJA4LcpvwPHIRa1Gxp2P 0yrFVbIvtPU6bb4YMD2Vcr 68Hwxn8KEb3cXjv3w3eHtV 6aTdoiCwo5eYrl6ik4jRYH 3FWU0Aq3QHHkslDWD5sXvJ 3ckyP4jOXNBskOGeM1E4WY 137Lgw0gey9uw6hDKI6Los 4FrbvIGxud4J9DeWC5OYrd 0d29ZVNUaxWOtUFzElL3B9 7AvTgaX6gs7L0f1O0qSlDf 3C3pZzGJJR8wwuh6npPvHv 3YcmUK7BiWMBJoRWC5p0vi 3gBPhTsYDm9xtuOt4iFjMW 6QotxMJ0VE8eh1rvm2alsC 1fh5YKCSpo4OvC6usURns4 11bs6ROtD5D1VfDcCje9Sy 2DLcXvfFrQRm9D1GzMbgMg 1HqOKMf8bNLaEPvd8NXx3c 3tN1favTAEXAadxfygjNmG 7F8ip8rt5cfD18wUTgE7us 08pFqsZZZYeFbiTGPQj1J8 512JyhHrndIxZ81JmYZLmP 5Df1IuQ5AqKIrK1Rplsr9p 52MsPDozAb8oy9IjsndB6v 4tYja8TMtjBAejK7pzP2y4 3s9BUjzYDIesX8PXqcWno3 4jAbuuhObXbHrJP5ShVOZ8 7ezSDJfiOAmSt5nYe00VaQ 1p6BhKjxF03jOd00W6io6O 56b6kZuturLKiFl9v29tEp 3YGG0dmOCgA60bQts3J0C2 '''.split(' ') to_be_parallel_coordinated = df.query('track_id == @id_numbers') len(to_be_parallel_coordinated) px.parallel_coordinates(to_be_parallel_coordinated, template="plotly_dark") id_numbers = ''' 3Rx1zM3nDFQzAOYg9Hd0D4 67AHtK2jFq8dlOLSSSbQ7T 2ystp6xiBASPJkFR16gJON 5VNGj3qgKC1n28B9etIoJv 6OarwT6HBT8jW6MsnVwn58 61VbbeUd8bXxzyZYrg4djH 21rvKibsH3WmojUZh5H3Gm 11wxWExHmqBNKIo6zK9NEn 5ZGXAHp0YPYFUMbyMqDQH9 4BMPGmOzi4H3S31B2Ckx0u 1VcVGJ4sqRv2Iruxc8CfYf 1xOoqWTv2wLhUeLtXZTm9q 4SV8h3RlcuQc9jE9MUQfFF 5c1Hz72Bc8VMbghi4MJQus 0iZOviuGDLFc8vSrB4RI2T 7JRV17HtiiXksxDdTdpYTy 7apGuGr4Zf6t9JkATkolAI 0Mw9dLno600aQgA0Gf9Usr 6jUXJaXtxOhBLeWbpR2kN5 1nASmYf1d9HiiIgEOPhYQR 5LAe0lSl7rMle11o6Et9WI 5LZu2syDoQNaA0NptU1YIs 0lz57CGwAyuYdMk7BO72XI 3MDnGMGGC00nbpkLP1r6cN 4QZpmKzjC5t1OxEKCvL7Ft 15sVDXzpwJLfHM99VeP7mR 3Yeb5nDeWTvXfJ4TdlTtIP 56Tuc3GqQrByXDZu82TfN2 2jyrDZbZoScSdiTxVRlzb3 5RHZg80sV4QFq3alySZABa 3IYkFudbmV1sgbz4riV73P 0xtEwGTNW1fjQVJM6PZ3U2 5zllzp3gvXWq2cmyBZReij 43hjTh4WF2cICX1JhwfE9x 7BCPy7FIt6MIZwIYjgwHUc 3HRLlKWdmzXfAmbcrOkevH 5zTE3LjI0vXoNs5sXe1wBd 5ijr9nCHXMTb9qYvn3taSg 0R9HIKNmfmn44AYsSux8Qs 4AtiPcMHA5VPbNlO4EdB4T 0Ica23299eon0SQ5GMcJYc 2xkcKjB8CYW1wXiZ4haZfu 1kcNoS77udN6sSUWq9mI60 2kWUZwAhXDQmEvxv6zAxsx 6a5vpD5O3gMZH7G8xwOv5X 2mg15L7RUwpaymfobUFHOM 6HMKAeNDeWkPaHVEwvf6OJ 6zZeZcCSnugaVt5mCiCCP0 58xiGZhGtgJGCBDlXwCTbe 5O4MkYjbKpC3UH7oE7YRQa 6NBheB7uq3KuwjrriafhSy 6Tdyv7xZrcnHmO9iQoysKS 6GJh9XXO7e9D16Eyw0RIuz 3ayOojGZYT6wNtFC0NDQTm 79wTeGSVlONiNfZTdyGUNq 43w1mfDBN6MHueSkUjN7D8 4HqgpQdgUT12xACerT4yS6 3XRfdbb65XE1bfrAwlRu28 3Cv56grsf8F5iWn4BHtZx8 3YG5WGhUOj8Qzj4q9hF4TE 2MpCXZtBR02QWKP6xwRqy8 1WmKw3lMhA5YU869ilylyn 0vOSZ7hAUxocDU7qPh0VCo 3rnjAdt1duHuVV5AjavYk2 3uUzHjzRxKewzg1bE4TJwq 7M3e3QMHiGgWaGqwaRS0oH 6JtZVLdOzT6GeTgPzSoGAA 5u7UqEwOyaEIoA1TLLFpz9 0TWdTb7si8hunDhLmynRsr 0fzEYa7EiGDTU9wz976bAX 1HybrAhpKs9bm4ol6UR8bZ 4dp22919ccLK9SpvAEfTbA 4dhR3lLe5XLiR1TDNuGJ25 2Ovrl3OYjw4Ys4UJJQZaVT 0KU1n705y9CXC2F6fBOWej 4sPQHt3Tk3zz2TxBv6iSwu 1IdFop8kheQ8DF0rFhHiqa 4Ex2Fk2vc5JOsYptDUBtJA 1slZlNfFpMAfNiqtf9uYto 5ykg5P1kKcYCVqF5cHXjYu 6IGRNK7vC8fuhncF7YXXg9 1gZRSXSFGgZ2FfTClxI2A9 46BanJsjr1hqamrvLBYkng 5IwncSTQf2nC5aTktUNJFQ 58iaGunPax6nehU5K3AlCO 5vEwDx78onSExtBl8Q44Qf 65fd6IOZZjFYkuApCdbGxR 0G69NybuKLFtOulxwW348d 1z0b8KGrWldcZLakynC9Hc 2iaJ69ql68l3uCFtP6Rz0w 525g3ZvALoI6eTwOnE0dvh 54Amn3maW5gDB20vIkOzMK 3ZSj7F0vNEUmr0pJX3ROcD 0DbubpYjXBCGCrbcVl6YCY 6gdYVynIAdcSMWIaK3x7lW 23NI7LEZNcNgqMQ4MtNZPf 3sVNfmjOawrMVBxZ5HR992 4CCFVqakDhrAqEBbIeebgw 4VRoNouo8soGhl3GaFLmdr 5Mtb2rpcBkZEbNqLx06qfp 2m2Si8RtoOGPfbIjDx9Ug7 64SrUvSXvi2DCqwnScNQ87 7boSAJxzyyCJbP3LcDzssT 0SgncrTJSvH5xrvkllBZWj 23ptyiin2PKgaHZW6F0mMa 6gpomTTKog3SU0if4XT8E3 71jN5pqWqS1Gq2UXg8IabB 0yItuTAWCQ4JRvo9a081uD 0TSzNyWeCGVz9VdwFLWc2k 4gq34v5gzCtdaL4o8drPBx 3IR6Za6YHTAeikVF8w1DvK 2pkluglrMGfygP1yVADsX6 6sQyFRXaDU3MmLORr6EdNv 4QtS332yh4ex5KFgcMA40E 5t6GgWRjcigpk0pXpcwzSO 1bHaP4ZOPgtpoZ3CN6bIML 2zT9xdBcvSo1CO8RZ8Tcqj 0GgFwGjaAdqVga8j3ZKCtl 7m5LVVSaWzik4h332VqvbN 1P3RGzIqmcHKvH68e5nkBW 6uIYA3RVNgr1btPAtr1XXy 79pqKla5Q9IiAQfK4jalAO 3KDZxrjgFLKWs7ds2rvVcW 3yiT9hyDinSAvubb3XZ8S5 4byppJf1BVIEYj0FV48uN7 1PihJ1fLjU2wkTatRudSyE 1rVYJMGey3MZapQwCx6xXn 3X1MK1cg0in1bV5s8BvI4O 6xDEZCZm0Ehbzgj1HAqLIe 5fDXSKPlZQlaq1jC3izCkd 3JOdpt3Msi1e20Nxmor4o5 7gLSX6HlNso7WkoWPCGNGr 0PswjCzT2lZY8EDjVRPrPc 3XXbyMFA9F4adfcnEjMKHM 5jM3bDFV7UuyhHA5264QAs 1KRiMLHjthCAhWqDunAJOV 79ojwy5zomoWoQNuaOWbKh 7qbUjczokcnGFIwx68aBqV 5IKtH5C078QBjDSniwdTXj 2LfM9NwbQkBFV8XKAwhuTo 7A2lPmhXhtlZlsRMz0ShNs 3nSvqC1W3IEhdubx1538g6 5pFoVXWo5sCBfC5pLZu1Gg 1XCccHjyDRUdOVrEOpLzoH 6LeiYw9DsrS6fTGG329tK4 7md22n0LputBo41lYOG7tA 6YPafAdayjyjcoPoKIxn6y 5Tpbw8WbGEwI2pzjxXrGvm 6ummA8cVxCDnjT9382Ui8G 3m9yfMVIpEYvNLQZl2f8YF 37S7watyULcdUTc7z8Opha 2uOPEftUSMDJK4UpsUjGPO 2Xv0TmNKxLIV0cVRwM2HFz 246dN8gCiMv5nHi5wR2Anr 6i05cmZT3PHtSriKFWxTPn 06M77pQeFWvFiVn1Be6XsI 6WW4VgC1CHJjrWxYOtvayZ 06qD1C1Tcd0mYdRBBmYuTx 02ZFCSXPFgFPEahuN88kOQ 06QqCHpEStp7fwJYK4qoB1 3XuQifZguMGzjZJ7zHw7O8 7bXHynjjhieyUVyq8PfjHg 5WGOhaEiVJzjeUbjgPK2ww 4FXamUtTru5LlMNoCjlBRH 5oi0T9CsacaGLVECLBKWq5 5ulm5IhULY27ehqTSrQeLB 4L0RXCGs4SP8CkrBbZxsfS 5jYACoLz1e0r07W9G7oqOi 5PbIFyF34gCASgnG7yi0AG 0iZU8XzmveXaRiWBpE1ZTI 4pvwyXkwtXdrKIXpOc0keI 4wILZuKMKmJZIQxW30u960 3DrjcLyxLSG3aOh3MvXnUF 6Zm6DJFgghFMnMw7xBIwyn 02MMgyaLCvnIBw4skXmZ9V 1kVyvQzqxOZz4BgAWOY8ps 6U3j5OkhwwHlVeVgZlyl7n 6wdOphejlm1hNfFhXmzT0l 5rNFuymSOcCW8nTfd3vYJn 7kfZsjQgEApwNuceCzJIp8 4AhUSi91kDdC4G51qwvDlD 5Oi4T8e7vZK1xfJgBEWDdd 5Q5POfYGAdWGSSYLtkVQ4T 1KgOw1rCe9YWTFbFJYuYjD 2Z40xmLbAGbv1vQno1YMvJ 4PgpYEtlH6VfWmds9jVDoT 0ERjKxvwU91tthphZGgLFn 45b5fAvIFHBWmEcBGytul1 5biNqsTCkccqUfmzRFVIPO 1fdwOBuqrsjf95i8rAMUCC 0Sm76b6hQobYvHebmCa49H 73A5MOZ2MJyKw5sigQe64R 56rBa1McCcF8Q6cyPOAWji 76B1zH5bbarUGH4CYLfvbS 1bUQorCYDuyQhIyDYWzNyz 0eOAeqbD5sxU77qdHSYLOY 26VXbBYVzPXvl0wAAEppnr 5DK7vMKUkq3ejNQK1SP2I0 1E3e15pztQETb3hysHnuDy 6yl56wrtGJVrnhFJjQvIVS 1xWDs7mhV3YbENkbEkmvH8 '''.split(' ') to_be_parallel_coordinated = df.query('track_id == @id_numbers') len(to_be_parallel_coordinated) px.parallel_coordinates(to_be_parallel_coordinated, template="plotly_dark") id_numbers = ''' 6bwTuNxmVEOQw0dXdmgLjC 4rTVdzMKkbRtcJtbHCtTKm 09m4moKIXDyQNZDkoDqjNk 74VJWMSZHMcvkHQhyFmsXk 6CE0gR4USBQnxKj9vWiotk 3REJFRU6OZmqWk5neOLPXd 1jEH3K14qOijd64Sa052fn 5Z5YYYAFiSsfwOm3EMmWJY 58bs4VQUlgyZcMKJVjpZ6o 78EsU5Njik3K2b1Os6zwLV 0BdUgqNA6b63BXGDu4PeKN 4PdEXwNLZrPK0BxuJwr0nJ 4kKREED4rj50B72mZFuIip 14houuG4FrK5ZHlzVccj3I 5gH7dn57qXFVoeY2IKULtY 2bJs4cwj40fPxm3m94ORe7 0KE6mugI11bbF8kBYC41R3 2PWUpPMK2GeLxLm6boZjto 60bhcR1KCbE3KXx0zDv0XY 1zl1cnISd42IeaGjcnQNAD 07jABQKHpIpXKCOcqWtDpV 1kdgim6R7kqUAOOakjyWGq 5NiqIB4BwRpoU1V6U195OU 1oNvNkTsX2YtpPpYQHL9Zv 038Cff0ZD16m5byH6ohfVM 0dgHfb4WaQAzBdS7n4SPmN 2Us0EFBMreM3VlE8AS9srv 6K3E77Wxm5oH9kEI7Qb6rv 2IAvDrAdvPDiz7Z9ABphO5 2m0pE0vX5h4NahhFsPMwnr 2jaKU9jN3X2auwOGjukuE3 5MtAIjUBeWqQ4ZUsb66vEZ 4CvRCtSjUTYksvMiHsT0CV 537UFrFPasLdnwe4Rk0ROO 2UBg1GC3tMTnw0VzwmLelz 4dVWz5zq7XXigjOfrAfI19 3Ek6sWpamhmmtk032Uhg2V 7oYH3VjR13Kmtj7o7xLEZr 5wZxmzrLNDTcw2JNyaKHS1 7EsGSHSaobePkf3Lsqre6s 1pe3AGBuipdklcKbJKDP9u 4IDNf4oDocAj6dufznifao 0rjX0ul1dfUmtNDAUXIPup 46Pk9K4Ta26lFiUs5thsU0 2OP7W1lsZkSWGBPdnO3mgk 3jrcoA3eEMZGKzF11VzxO2 1XbzwdyDW4YohbntjCdso4 78XVcxI67oXSzfV6YAODtr 3BWTnYtojgn68TZSkGeaZw 6pVGYwDiMSfrEAMdIVSoLt 0S3f2G3nuCWHmmSbck4i9C 58yF5Yqokn4NxABBmpK8Yi 0cEL1Cg68zorMS2hFq0JJI 536PcP6LHChvhsH64QVBhq 4gRH3vcS741pSZW66LQK4P 6ULiCxVUaWBG0Gw2UAg8Dz 5QkHEhAJcVrsTKSZFJDzwX 5bQygUkLEUYEWSk6rA59QU 4XdhTfbWbD11U3fTW4EHcj 1rS24VudoY628mdFumzVcI 32iYiowgoEfTsWQkcwTRlX 7HcbJJxIaZbbPIRb1CyZ3m 27do8NxmUa0D1O9Mfi7qJN 4MpCSQSpk2yLnfrOSHsZxq 0PkKfT55z3nNSVhII0tZdN 20QnKWlncgqaX5NYOybhgy 5gFjlxAUKTqM1GUlFNKw0S 0CkMQnSzNWzx30BaLnllr9 30ZIabSNa8EbZT49b6HdFO 0hrdCoV5LPC0ni1ahSbAID 3FfWjwjwjVDZWlddoQ7jP9 1RDif5mDdaGro37AxOVYoJ 5rfLztZGbpbF2qC2sU0LZq 6bcIIzSu0niVuplUk7t7LB 4khYVmGHZz4JWpFlOMXanb 3xXqlPnnVXRsxfz7UGVi71 5a26fblCJE2O4kEJSJxU5h 3up1JsYa4JNZBakiWP41s0 3WOFMQnYvfcGFxA13J1e55 6On8OnESrMsfScviCLu0ac 2vVVMFMLolbasmvpkyEF8K 2GgiRBztrAUC3SHmBxAgdB 0aCwjJMzkOdxUZfAjKtmuY 5k3DQ5XZGBc5a0Rwbwc8hW 3DOm109bpm8LVlGrPj8601 6uSQ61RK297rMcatNDbUqW 4kcM8vye44jgsRMus1UjER 3umDgMGgONpKVH6KzpCcho 6CqEVY16aBgIMzKmHOBLAy 3x2Xk59n3Ey2703JJX8ss7 0ajlXtd6JWlrEGt1Cb2gRH 5YE0jwzEgR55ngUvtAzEG3 31Z3tkTDOaYAIJt37DG7lW 0v5tTD8cCbNsuSPdZq4ppU 62tQ11UnK9za7j0dyqT7Hs 5h53e771faNluczmIdNTqd 2lhWPS4vdx7F0kkwfLmAwG 7oLLKRFfOyE6FnIbbpXsyR 16Hf2J1HuPbNPWFvNZzYPs 6i1fuTteHcDcO64tGAnGeh 0URolWwoi4SSkoNHXDrTpO 6KiZqNhZtkdB219BIJkxNJ 1XKMWyhXlzu54mHfQuLUlf 064OyTlK7wUeK3D0OcCNcp 53APvcivoxGrAmK2b0Givf 2qKCyrQ61bmJqoV0cCl6eW 2mpINSrBUHvmP5oYSZ1ZFV 5K7gKm344eKOkDPHQPKAzd 0utSnGPZthEAuKH2kUfTcj 1FC2CEy48qcygiudnhS11x 2uGcDgpKyKBIIOfGwTd6bu 3CgPWIPgiLM0fuYQSPV3Vb 3cQCiT1PvddSKI8pRk4ygK 7rPm8nyaZMDzrt7HDFC1IA 6FS6mOlzpyIWMz9o7pZoWo 5bOGB5m6V5yWR0tGhbBhX6 6HnJLuczohJYWkDGgYmm0u 1BZe0OJ0eEjJloBAvg6aJJ 5avuMjb46hBDucxFvxn0zo 2Z0q1138jfn6aSMB7O8o4w 1sVtiUcsOJTWYjucbPoVnN 1QSdwCcfv00YVFjlMFzlo9 4IRGT4KQBDfevJfYgUuZvP 3zM11n3Po3s6eBH9QAqcNr 5w6y38iH5HdSNk0EtjAdW9 5BZNTeEo1t1HXVucObfYSp 66bWbHHVd9Zi5xNAKQjTmS 4NlYgUpDS3K7m7mw4lsTM0 1NBksoTuYxMACF2v9OVDMB 4jomQr6ARl89f4ZguNlIQm 3lQ1IPdzulBHfTrqLYH4vX 7gsd2pg4vXfmAnMuXRxTEE 56Sz3MTf0cGyjYwTJOZVRY 7aw7h5j6BK5KvzSPNpKNRj 3woUcMUIeew0PfIlEAGUcH 3j1jNAZIgr4vhBfI6sgfxC 7zhc7NI9JHyPmcOaDcHCVn 6lGe38gKVRfF6cKeXmhidF 0XUZDGgOioOehdcstP1hU6 4aILeLn5yHT6AsB1W7bEHG 6DdGyHy8hlqylxfaDRpVcK 2Kt3W0rl0PjPCOjAsf9mjX 0sAuFhtMq2SKZ3jZeU59Yn 6ldSXWJYVt1Qig7mDm3fXv 2YlIQsylMAOcqI7aLas6zj 4G96MmIt9XmoVPn9XzgtSy 4gPw3HZ18KN0UOniw4UEm3 5n0mpjpvR5iWWkiQL4kgRX 2pX3YMabAIjH2yQxb56n9l 4p3zss13iYj3TcxUgjmrKM 3QuoES16r0kfiewaKeYYnJ 6Cz0v9MHjAdviUGTtzO3Dq 0DdCjDmCzioT6W6nIhMOgA 4ZNj2L44lvkGZ58SaSql7O 04ENoZKEACEkrcc7v9EjnY 3xYgJpdnAuKPBSA0LHtg4I 4Xds70hJW0HNo0K7OKJbl7 1AIYotQAJnVXpyfAznXK8y 1Ez2SpFr05CspgDgHSja91 0si5v3WiNFDgQUcbkgRp3o 0HRQMiz9Ua969JXOPVLlcB 51XnpBsO8S8utaHscyhOnP 5myMjEVTHoBQrvatNM0kyy 58b7PzFbREarz0Os8GRBZK 4sX6evSOdSL04HR40EcEN1 4fubn0dRFW1WMa7yiYIZSs 1OKVJpL9RPeLjFGJUzeXv6 33gjPr3rzp1dylPMPgvLYV 2qeEyuDUaucAe63BoqJqoS 5v44Md1bcJYN0rL5kpWfd7 6PSyaM5jEbwLXm1RsKZyWE 0hLPDVYwODPeJfkHSol5aI 4OPPSKaowfmIiUEVNyh0l2 682gIKe9M4YJeDbw0Uqimn 5aGZpag8gyQf8bYu1RhYZe 42o454bTsMf9g1A0cwGxke 40vqauqc0VQpvTGYYH8ad1 6oxVrlxeTwhmOroYJkrAad 3AVBA0GTpnMFh1Rv6Xqymu 1VZmjJ3WV1nc3ojykNVxFa 4Nclo8xnQeuX54AGKOybbM 7Dba82QckMfi9xvgeePc72 6PFiq41950kSI58ILz7uGO 2jJUHXFaFdvtxCOVW7q8bd 2lEmjaR8rQqsQqe6CLXtdz 3lPO5WuqFNY12UGkZzZ4Xf 1o1tRS1Vzt9RZDJSDJUzSC 5D7erlQmTndO42J9VuvBW0 1kjxPdNwFKldrMVxlO7lio 3l7DVkePu6bBxBXTl8cIDc 6pTMJuynSqNQXuGar4Skno 7oGEP1UfFPnJOFeE38Erjr 6tIXXMXvOi3XNHdRTwYFOl 5lYAexg45DfNm7LfJNYMva 2wgL4gIm8InPw4IPaOBp8h 1CzXfJbCKcHb33F28SyGv2 4nHMoGnvsDsCMHmwfSVWop 2R3ifU5sK0FygVOZpk1yJW 7yeO78qI0fxnz6gjTZEp7i 68SS7wcjzSTXcifbplZztH 6fbTH5few6yjRaQuD0tqfA '''.split(' ') to_be_parallel_coordinated = df.query('track_id == @id_numbers') len(to_be_parallel_coordinated) px.parallel_coordinates(to_be_parallel_coordinated, template="plotly_dark") id_numbers = ''' 16VsMwJDmhKf8rzvIHB1lJ 4DdgOzDiK3VocRlOpgyZvI 5smmdqbHwTdVJI1VlnBizP 6lyFgQE2nJwT34DYJO0gr9 6C7oT5ZSNyy7ljnkgwRH6E 4YSO3y5EkzXDiBW2JSsXyk 2PktIwDOLDNRntJHjghIZj 2OKbnAB4LIw93b8IXJr34m 6drCDhqlK6cZ7LKDi3SB18 0ZsWvJXGaHqKUHrvBjZxSy 4hnq2TnTGgiLG1qFAFQtQG 40OCjuNPJQUTjSnTqFc9u5 2J3vblLOe0NKOJvHXxmvuu 2NGl2ljBxtvl5duT5U0Rgc 07iwjTrXQsfQRJ65rEConJ 4Mjn1iv3fhTtDt1ZRnUvn7 77MM047j6loQsPsUFntTiC 1oTmjppGp1ITPZCKsYNqs9 1DJUNsDTNuMWGrxfJmNGnm 5ZTiNyy1YtvyBEwDWoVOsa 20iBwNgEMH8b63MZ7wmN2F 6HgNAjt5zvGy3YQfib9hbC 4zG58gSipyazhsiVdS84lM 4NDw0ExQPFKQNkkFKvPh32 5ghFFUCCEspRulW23d3Awc 6FCl5VIhI3c6StmRgieLKu 1IeEYWlLBatGhtSTVRdOgJ 5MzQStKKOo666peyPoltxh 6D2KvMGxjFMk47D6CbCEaT 0DVnlsmBltpcWafM3TScIu 6jwmlu44QMMDesyUIFLQS9 4lUz3IxMsXYpsrbV6SVQAM 01y9jiO8FHCzv9iLmYpw4F 5XIkSMJ9sODfZoHUJYoi1g 7atUBpdQv34PNmYix84wzR 6vhOg0jBNyCzQo7nlotVeH 0m0ndzeNd7bTNWpgeGoQcP 1NBBs5Ym76El2gojyE4EvP 0R5S8PHmsl3TzHdMUx1oiM 1b35m5XbZpyNAx9atEDaDH 3aCIbAoc0CTE46enUrDmuu 2Y88xiM3oe4DFYX0jLLSON 7DcVWzeud5tqtNTZKQWvhz 6DdG99q2hNKrSHZ7hL6pBt 7ESz0yGdmhiWp85j5z09Ub 3xmwsqwkhI9gbvmapDO9S0 2N9LsBQMtLyMZL0LeydiLW 1sGGodtsPFq1JC2w3vXZLv 150NZIcOF5CtN93dp72A6g 1COgmyz8tnpvBoZvqqZqCL 314QsKiXd2SgDXPYNsKu0N 57p3QcWwIjVwvAcQpu4hkr 5IYNm9xiOZkLjGJYH0kqsR 6z2Rtx1CjQGaEEC1xzqtIT 247ye33xXOEhnjN2rCdj8I 32ccjDeiYYtombISVtse9U 5eEZLIu17HRBwt0Beldd0j 30DnQCN64v8xBpGZpLgb6l 0PrPfp5FbP87rTk39MUKcc 14EblrVdzyjpAWaedKO7x8 1l5CriNdYpEL3NoJxKA9uA 45ZTQl9GbmdM418qgLZvQZ 3dgf8JT9Ya3QAfWaJTNuI6 6ga6wioJAkB7MtOwremcSe 3HUsmE6j4afm7zWM3bprkW 7Jcf74UJvImsHrGOqSS0tG 7he1eOKQBxz1JK66afUzzD 2jtaAeW1k3qgbpQxT8Y4lm 3C9ZhZSSd2ki6Ko4Zj4sOo 3KuP7KttXAKmsjCLx9gKeM 6I5FyefGR36b9OF8rFkxVK 6YNIvsHK5fdy0ROHDuFpm4 0M7ZzCZ75sAUBq6Rkwpu09 5soDoRuEEmx9BriBtoWbr4 0zjLqMGvY7j7TuBkh2MIVd 4YfWZTRKOt0Lp1x1TkgsJz 3xhxhvEYDY0Txl8jUqbH0p 05FSDW170E4Brk3Et2Tsn9 64sixBk8xj9Eaz1VmdbenU 2KcO2wBpD9kfEUq7K5L8NU 5lpIW3pxLBGZ47LhXmHuH7 3aayFmSl21VgL3vybq2EAe 1nhZ34zdByR7TKRNLi6jXH 1WU3fG5GlEsQSsxj4SlGn2 6mAMDridbMDlW2ovdyPDUy 4yKqq31wiiTYlzsTspc9bF 5BgjDdJGaa7iB3kQfj6QMh 0AYTA3nevKu9S6LpeJwG7B 2q1mQzjkmrUINRWiyvctSi 2OIGt6nkvpYyTCsgqgosut 4nHpPnnYddn9KhXWKcVcPS 1aeKIPo431ykCa62MFpVxO 6J0LsDeQEMbXNCJCsPEnPx 4U4UKccQf96YM2pVVehbDd 0iInUMrkWaGGUkPwIY1Ntk 5kM4TGc7A3VyX1AmnIznGx 5ByZw9BY1See6eYgqUiB1x 1odwlrTdOkOVUoJhlE25Dx 4zsYOCkDiS14hdCc7gJX1Q 3XnpqyDY1Jo53Tgod58Mxf 5w3peXuUoDQIRWJbtK4kYi 1LWhjl461aekeNdmQk2JuJ 18zmtkXBaSHd7G3xobWIEJ 45vdRv1YwLbpbVeJ8BO2pR 1K6WHHqLXlqyGxX2lUMQr3 7gIS4JjropHYqNq3UzjHNB 2wklaFrsGnIfvLggxQhwQB 68WhMF4gKml7wKQcpILei6 2NVoGLBsrbQrH9c8bRDQu7 5gxxz91fYTlkR2cqmDkPWP 0tewjlNbotxqF2obibsg36 55hoUnXPjk2xma2eYSbltW 2iGTayx2t62y1J0XOInyfX 6ScbJrUjGIWS76VXsK8UEp 6M1W8DojBHXnjenYcn7H7M 4VyvzQoIfG49xiNuYVYBiv 1dMabx7tqxUpeDYQAu8c7S 2bQN2bSNXxpGTnVKpKXl2R 1FCueyFK8jtU0zmxQZyVtJ 0sMph7dbpLD4DlzEEfJlpX 5rW3anmLNKDA81nVJvW50H 0w71NjrPNzBsa6yO0of2CZ 76hmKWewz3vGnKLbY2nPRh 3BIyzKK2U5O4Ij19G9z51J 5OLQw1i9uk8Je39V0SJ2GR 6FAPlqbXTuXOPM1UmJj1X3 1kAJBuEhXnXHNA64DDO0Bq 2H5cbxbGjC00Zqe8IqKHm7 6wd1MrcFIjgblPkTvm0veJ 2BfTod61ST4H3K9jxPg9mp 4Uq8jQxsADt7piVcuwYgVJ 3z8VNabIASkrBxq94cP3TL 4c86vSmmzcIO4x21LuD7XM 6gqoJC9MUub1AbISMFCuWr 7s4SSLsUwBjEJzNVODbV8z 1zXA806qSJVWnHpGWQ3UUC 57E1gf3WclWxUuLcwYYyU4 33azw14HJcaClFGZ5kW6Nn 1izLAQzCTkTCTpu3l9TFzB 754UYs1LuDtaEKKfaDkx7Y 6sNMSl0MAqzvlGEt4Y072v 4aAZVfU1M4cm7XqTnzhCnr 28Val6Yko2x2iJQ9YlG789 4RwLQseJrBm0Pjl6vQcY5D 4TZvXowrJenK3OCEbmJzUT 1I3iCPuCId7Vkg5rlqYDrp 7hWa53fOj9Fh0X790Bl32B 1JMkYhhLa7KPDd8i3sPGOL 355ezvqbe2QtgMf70xXBE6 0KlGGlCwuBw9cPcjq7xjgf 5kwDBRZrCvDtN27XtT2wzA 7oMJTXLhm8TAkk6K3j8u1E 0ELWm49HJEJqIvqzTdZK3n 6VziOL8abdt5gchEEBCMRg 0XUHYxHOOctkSXReILAaJV 3wMVhcD7YbfOFqhgYiN9hp 30VCkYXm8pkZ1rOg5yC4LL 1NE1ljBeJzmk6wZZ4uUdRT 6FWhcFQApH24r8AgaOLrFw 5z4mf1xZt0z0u89ntbWN5z 05Tz6QuSWq66WaqpHGK6iw 6xq7BAoiGiXC27rW6RH3ww 47AJA4geNelnpulvvfZjdn 0BOhco72YhbPpJIqDEZNmA 1ciJCLzKzezhHbBtii28UD 63IkPNf3Z4xHLASIyhxS1R 0BNWj55u3tfVB3hozoC5lY 55FD4r3EgXRMKP79hDbt5y 3SatXFFuUyX2IlV9JbaWp2 0L4u2qg18ieitQkA2HBXgq 5OmUVlZP8zQ5zGCX9wsD3p 38ueylzenb5JK5JHDGnWuO 7FLUgR5esAR2m8kl6CSQ32 7KOOHzDAxzl87i8VYk1iO2 47jAQrNH7CLIcYu1lqE7pZ 7ve96Lk22N2ZGVqVq8EJOf 6F6MrtUbHqf7AASOXDMlMp 78E3QFSTlLijRUrukdbXK8 5wMlr2ncg0SoPOKEs0Pc85 0rfSwqjq0k20rVZLzATVwP 0PYPlbP5Vdz5ivIfC0jAmf 4UWkS1obHdt123rtx5v9cx 5RpMFAJcf116DGFBcK5Ny8 6i4o7jn033PDiNab3Yc3jY 6FCWOKBTjzHsHpa0cF0br6 2b3Xo30P9KFEqBvsTRQTM6 1b903k5gadxEFXhbGHAoWD 5tA3oQh58iYSdJWhSw0yJV 4f01YssEopYUrYIO6YZmjZ 3960gvUO5yuDJtI6VtPqYS 7fc3kOECAsJoCbsV2p64rt 3CboU4vdisSItbjfbx6SqO 745VS3h8id3zcLh7Gd6gGa 5JQlQR9REVJmP34AqI7Tpc 5K4LPGFKqKO7YSbUdSQAZH 18vjAkuAMaSxfAf2EAcjP5 7is6wEBQ4zPEcjust2rB7u 1PxJV79Px9gFHPLvFO9ZOS 7cgt4TZJH3HDdmHQhfVmzx 3bl6n1sBma0Lp7etqjx5j6 76rLK2XhT6waumcLkLNTID '''.split(' ') to_be_parallel_coordinated = df.query('track_id == @id_numbers') len(to_be_parallel_coordinated) px.parallel_coordinates(to_be_parallel_coordinated, template="plotly_dark") id_numbers = ''' 6eZ4ivJPxbK7I6QToXVPTU 6V37apVtCiUpEKcAUyUjoA 5SxlhL1idBgsfYBfR1KEcR 0C0XJ2JYr9jEGAt89JyZqJ 1XsqZ0mMrIRMAktdnEuFF8 5SUMNsXNVtR4ujz84sWEWe 1xfTdLDg10CJfhcR4Yis0z 5zHgA4J4CrOaUvQ9UD219j 1XO9zgpDMkwhmAijuYBCxb 1U6vwXAvc7VvbhqNyedGEG 2T9ZyRnW6omzsVDLo4I72l 0UBDke5y1kqTgTkgmyHiwj 23tftAc7uJnxEfy5AGS9lr 0n2gtAOGT6Pxu5cEeaugym 0nqRtO4jdv4K6AJ7hYmDW6 2wsVeO1Hqx6IqM48UXGWSO 7mmqxoKWTFZB8tHXfQpmk4 336ihMIODpi6nlL1ytSEm6 4w2lb0V0qHGwj1GR2f52c5 7cKSdtwLEayFd8MuLdZR85 44q1XQgawoP50HHMiMMWCq 4iPaNKCg8kY3rwUK3CnUw3 5EvsUz8wsUh0dP7HaixMh8 6A1prRyHlB113go9En4cX7 7iylYXaOUTO3BixPecSjhP 52pvmjSRaV7k0TCqJK5sKn 5ATIMj2gOKsj06UvoTkFxe 6Isu6pTUwBa3ftiyOpKf7s 6lajHnTKM9Fiv10kzUpD90 37VDfyF70jTo1HqGQOsrRR 3RYMOo7YF9gCkVZomhOPrK 1ZIQ5girZEdA70xIkevkrt 76C7vN5uEcuF1BXvUJMvjk 3v8Zu57HCIauve733J6PjR 0KfjaQSlDL0r7dLaXNDMv5 7sRTfvTV5EUhDY4e4LjlVS 5wI6LhywYSgmHNMVERAJpe 4K0hPQgmWzx4jGM2Q4tNQN 0WmyLH7XemypvsAHuIOCp7 2YbZbmqqxrCysQDc4AkIIX 1UegIYDIgDicEBuHhWY026 3gdHLVZqeU2mHNggC6Tzwr 1uYAog8LWWeVnqNWItZaHc 4LpsUDYp9D7VvzU0iRTCq3 2akKNicOhUSp1QHQEQDTbC 4zHo8J0WbUDDiHTAURs6kO 32Q6wqR85WhBeoqZwMRwnV 5iofFSJRoRDyiKD4kWTpf9 7owI1qTHoXGBVznJod7yuh 6rbiT8DV9h50NBjPxkDygF 5twkCu1ET6objhnLfQtgJQ 7gGLo0dwMbJhRy0JVJP00p 2ZWv2tklegv3gwKeLD35o9 7sLsIr2vhjYeR6rniJj5dj 5IOozjD7gJOOhTV1lDXrXl 2cC2PIXKFjnY8sbuS8spzw 4PHM9PG5J6IQ8fumsJuSYJ 0WcGdMWl75v33B27KafycK 6K4pZ32MorbsHeqtAwaWHW 0h0jNccol3eyMQ2mIcNcBp 2MfFjRh4gv4lU0vtYH0GaZ 3uEFKAtU1hdfcgFC60yt84 0slfqpTh3q10bNfAYb73RS 7dg0pRcn7R5VVekBryq583 082bDyzPxizG0gIqArJoQ7 73OC95krAM3n1u2LcKraBX 3qpm5w0qS99qUN0q8MzvlL 1NywSw2TUrdnpnNtGu8KL8 1zSqLFmuL6mDCVbZNj7hTR 7kPsDSN7eFLbzNF0xEchjc 2qw3xeuKWfsV8GynO2peHr 6tEeqhvdmOVU2iQqnLk2zg 5K7VRObcsBDfKnyVbVhwTx 78WeKIDpoVu6r0TziQwl3y 4ZYir67KzcmiNKTmFVqNf8 22BJjJeknJ7ff8vGGzPB98 0b81xIMQLSdUpeGv1oStXH 4u00iLhEPkbLlclQDYuIHV 1p8QusGejMBctlhsZ3jtSF 2FzI0rp4FsSvx7N1GFs4HB 1XKqzLGxhIcpEXv8SoA8tu 6T3yaivZB0v5AODCyaR67G 4WOPKEtVmSAZvWXtyApl3h 3xvtJJiFdTR6d5N8PaFb8f 4ZAjZHxvrzKZMXdHmg0DFz 3ekvh2GPv2ebjPHYKhuIXG 0bv1k0dLjgp9f9rj5dBScM 1MQio3srmAmDC0c32Xh56A 0BZ7rkI4prRAbfkO3jo2OB 5Vu5DPFMNAJc0eoq7i8skM 1zE9o1WK0Vpocnf1H5nssQ 3zdIn3IbbJAddtf9Qo6i0D 3huj9hX9ECvhipWIGNObFl 1rFMpIUb6Hs66ypS32MOOb 1Qmb5p0mK08hxMjWJvCfBw 3C6fiBrM14YAynsEeRZXWv 4t8WpwzDLTYwMulJBavljv 7vqMKsg985FFLyK5DN9uq1 5yqoXxgDIQ9fPOcSAQUjUq 2D0FmjFP7dxrin4XanSnbo 4Yuux4zVxXI0KVHil24U9L 5MzGtEojUtMsLueJ55hRn3 2RDFWx08YULhklhS0DyVtj 4yEdofTvNsL7PnBJNDN1Sf 4n9SsVwbc7Y4tn5UfPTNn4 29ldunhjkUfuB5k1gXlqFS 6VFAILGN7uOz24elIyt4vB 2361cLjSnEpolPC3Mb0yv1 0T19N334CPKgpMpxh36KiE 3RjuP7n7x8DaOVN62TXFke 3V5LrENP5AgplQwvGeTIIU 4SNbrw7KNj3rupRnXzV31d 5XdtGPF22knBwy1fAzjSCK 3GE6KLTgmCxsNzhp0nI3Zf 75iGW6GTfBU7j6ldQNAvu4 1FvxqWCDg1xYdg0eXOr9FU 3NmVag0g3N0B4nDT0ypVk4 07jMNENLpJ60ej30L1BFPD 4KVybsvg26UiPJEVynN3qE 4k304lkj8Ga9Kp0p82cii2 1HVwhAQMU71rg7GVlQVxNz 6nYTfmQEE9ZYYFzdLRWP8Z 5QdTBAXXaFZDhsBqPT0GBI 3QElxQCbZjCqAG8yLRwLsm 5yvF3kvaX2ufVt3VvWbGP2 52uwpMhSoReK5wQ3Yxr2eC 1awdo11NQFC6THLXQAaDjV 6n6Wrf6HRSgTXwyWugKDwf 5MXF8IhBY1z63VZVRvFZUK 6NjMv3rcXwyQg4Dtr3WpoE 0JsAUsmagEqYQo8FZUkpBE 36Kumm8Qj49ABflKCvltIH 078Sr3upDQIPRIAc2IpSxy 2wJdo21bsx5HfTnwPJ3p92 0WWk0UiErQiR8EAnSjll1o 1Fs2986kJPeJR94vCqRGha 5eImJYwPyrdhUqZ4gTO6Qs 6bXr647nkFkrphCoA3L2KK 1counClRuzpBxsb8gkTCmO 7yCtrkXdQEVJQyk7pFxGyq 4sGN5db8sJsecYNWoxLPky 4EbVxLV394SADIDf5zFTHY 0tZvlW8YxwnPS7Ui7pzF9q 69LAIJUcPbsw6G8F1vCv1y 4wzeevLrnqs87z6FrcFNKu 2fKvOnZPwh4gz24MjM5hWp 3Hbl4FnRkj8TK88Jg37Omt 2mSHfW689yTYIZCu0k1Frb 00MLppbVubwv4Rbf46CCfg 1MvhXhNkwRJDH94ZloFU4c 7oM8U222NuBLUun8aFjhKu 2veD2T9UElKuePBt6FW4nO 4Bulfi18OkBRXehhVg1SzI 6M9bTZutc2QtXWl2p5TQ1I 4fM8cupzQbc6qNeDK9FXu3 7xktbw9wyJyJbwS3y4LZFg 63PP8XGwgRI7gIruMO7IG3 3C0Kxh2lnOTmlSCD1rB15W 0YFoUawskWM6iKHSyQgeNZ 1HEzYfexDpgfwyceOWvNz8 2zKB5hjGfqoYZUi7B3LAK0 3mEnnPSXvKoVouByyUqhUX 0dC2glrlKpld5xY5BAX9lK 0XXvMZGbrz60taMwPbVGgK 2y2xE0gB5lVIGbdAnHNUIz 6Ech2zanuCQ2ihfXDOLtID 6rEcPr1jbReCGcT7LD2cB1 0gn77iNwUHN2pScHbqttN8 5NH0w0LSvcjiMjWnTwhm2u 19HDqVwakevUkynlB1Ztut 0g5kny7FqZlnS1bGMPQFWR 02PBxJsA9YIhdbiXMNN9Cd 0tpRok1p8ooccX7DQqy1BZ 1P5uhYSYMDxXpcYgpMYnkg 3UTt7dSBf9MG6833z9gNUV 0Si0HsULu8gFAtYm0BwqXI 4sO0deplZf1WJnXwrEVNUt 1fTuKuiLtYmckVKwtoT812 0hMOYGKQK3m2ipKTZKUbrI 6nsyzCRGHluwU3QIDSQr6d 5y3HyzqdypXCRFz2V8OpOF 0mPvAhvAA0IyrcbUh9KEQv 3n5N1ECcHzZDvAzHLpJULT 5Wo8dHK8N9pMyDdXI4WWsZ 7KvGuebu3RAtH0FSY8RG6l 6XEfmMikJLYbYZ3ZL4l7yK 5ijg8Z5M9WNI2VLXDaxrAz 0FGiZTL9LSSzdO05Vtgg9U 1tYLrptJ56VWore4o9Mj50 4EI3t79hsPIQJLdHitvB2A 0uwIsRVkvzZTzxqCQHlgiz 4dM9Vju1O76L2V79EebLsj 20XscF3HtxEGo8ghFhOgCx 0QPSeBG4P39z9KOihZARLf 7wbsdw0VnVe421V68sNwDk 75nO71NiNoIaGVIqYTqSvN 6Jk8VFFPoUyr7zCXIGcUQS 1UdTsJcI4MwzKIxCP5HHXG 53oWCQ8bcFSFzcQd0Xggl8 4iFYF17QReVxN6bQoKE4NM 4uAg8KXLiGu0kIvICmdUR0 '''.split(' ') to_be_parallel_coordinated = df.query('track_id == @id_numbers') len(to_be_parallel_coordinated) px.parallel_coordinates(to_be_parallel_coordinated, template="plotly_dark") fig = px.line_polar(df.sample(n=1000, random_state=42), theta = 'tempo', color_discrete_sequence=px.colors.sequential.Plasma[-2::-1], template="plotly_dark") fig.show() # Make a PCA like the one I did on the Iris, but make it 2d and 3d because that's cool pd.set_option('display.max_columns', None) nearest_neighbors_df.iloc[[69000]] ```
github_jupyter
<a href="https://colab.research.google.com/github/Shaheer-Khan/AISem3/blob/master/HW/Home_Credit_default_risk_HW.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install xgboost #data.set_index("SK_ID_CURR", # drop=True, # inplace=True) #test.set_index("SK_ID_CURR", # drop=True, # inplace=True) #full_set = pd.concat([data, test]) !pip install kaggle from google.colab import files files.upload() !mkdir -p ~/.kaggle !cp kaggle.json ~/.kaggle/ !chmod 600 ~/.kaggle/kaggle.json !pip uninstall -y kaggle !pip install —upgrade pip !pip install kaggle==1.5.6 !kaggle -v !kaggle competitions download -c home-credit-default-risk from zipfile import ZipFile file_name = "home-credit-default-risk.zip" with ZipFile(file_name,"r") as zip: zip.extractall() print('done') import numpy as np import pandas as pd train = pd.read_csv('application_train.csv') bureau = pd.read_csv('bureau.csv') bureau_balance = pd.read_csv('bureau_balance.csv') credit_card_balance = pd.read_csv('credit_card_balance.csv') installments_payments = pd.read_csv('installments_payments.csv') previous_application = pd.read_csv('previous_application.csv') POS_CASH_balance = pd.read_csv('POS_CASH_balance.csv') test = pd.read_csv('application_test.csv') sample_sub = pd.read_csv('sample_submission.csv') print('Train data', train.shape) print('bureau data', bureau.shape) print('bureau_balance data', bureau_balance.shape) print('previous_application data', previous_application.shape) print('POS_CASH_balance data', POS_CASH_balance.shape) print('installments_payments data', installments_payments.shape) print('credit_card_balance data', credit_card_balance.shape) print('Test data shape: ', test.shape) train['TARGET'].value_counts() train.info() train.select_dtypes('float64').apply(pd.Series.nunique, axis = 0) train.select_dtypes('int64').apply(pd.Series.nunique, axis = 0) train.select_dtypes('object').apply(pd.Series.nunique, axis = 0) ``` no null values in vraiables with dtypes int64 ``` train.select_dtypes('object').isnull().sum() ((train.select_dtypes('object').isnull().sum())/train.select_dtypes('object').shape[0])*100 train['NAME_CONTRACT_TYPE'].unique() train['ORGANIZATION_TYPE'].unique() ``` As the secondary tables have many to one relationship with the main table (application train/test) hierarchy is: <br> 1. application train/test | key = SK_ID_CURR to | (bureau and previous_application) <br> 2. bureau | key = SK_ID_BUREAU to | bureau_balance <br> 3. previous_application | key = SK_ID_PREV to | (POS_CASH_balance, installments_payments and credit_card_balance) <br> <br> Merge the tables 2. bureau_merge ``` bureau.dtypes bureau.isnull().sum() ((bureau.select_dtypes('float64').isnull().sum())/bureau.select_dtypes('float64').shape[0])*100 bureau.fillna(bureau.select_dtypes('float64').mean(), inplace=True) bureau.isnull().sum() bureau_balance.isnull().sum() bureau_merge = pd.merge(left=bureau, right=bureau_balance, how="left", left_on="SK_ID_BUREAU", right_index=True) print(bureau.shape) print(bureau_balance.shape) print(bureau_merge.shape) ``` 3. previous_application_merge ``` credit_card_balance.info() credit_card_balance.isnull().sum() credit_card_balance.fillna(credit_card_balance.select_dtypes('float64').mean(), inplace=True) credit_card_balance.isnull().sum() POS_CASH_balance.info() POS_CASH_balance.isnull().sum() POS_CASH_balance.fillna(POS_CASH_balance.select_dtypes('float64').mean(), inplace=True) POS_CASH_balance.isnull().sum() installments_payments.info() installments_payments.isnull().sum() installments_payments.fillna(installments_payments.select_dtypes('float64').mean(), inplace=True) installments_payments.isnull().sum() installments_payments_POS_merge = pd.merge(left=POS_CASH_balance, right=installments_payments, how="inner", left_on="SK_ID_PREV", right_index=True) print(installments_payments.shape) print(POS_CASH_balance.shape) print(installments_payments_POS_merge.shape) installments_payments_POS_merge.dtypes installments_POS_credit_merge = pd.merge(left=installments_payments_POS_merge, right=credit_card_balance, how="left", left_on="SK_ID_PREV", right_index=True) print(credit_card_balance.shape) print(POS_CASH_balance.shape) print(installments_payments.shape) print(installments_POS_credit_merge.shape) previous_application_merge = pd.merge(left=previous_application, right=installments_POS_credit_merge, how="left", left_on="SK_ID_PREV", right_index=True) print(previous_application.shape) print(credit_card_balance.shape) print(POS_CASH_balance.shape) print(installments_payments.shape) print(previous_application_merge.shape) ``` 1. application train/test (bureau and previous_application) <br> i.e. bureau_merge and previous_application_merge ``` bureau_pre_application_merge = pd.merge(left=bureau_merge, right=previous_application_merge, how="inner", left_on="SK_ID_CURR", right_index=True) print(bureau_merge.shape) print(previous_application_merge.shape) print(bureau_pre_application_merge.shape) application_merge = pd.merge(left=train, right=bureau_pre_application_merge, how="inner", left_on="SK_ID_CURR", right_index=True) print(train.shape) print(bureau_pre_application_merge.shape) print(application_merge.shape) application_merge.head(10) ``` i was able to merge the file but because of the size of the file processing any thing on the final dataframe the colab server crashes so i would be modelling over the train set instead. ``` train.info() from sklearn.preprocessing import LabelEncoder le=LabelEncoder() for i in train: if train[i].dtype=='object': if len(list(train[i].unique()))<=2: le.fit(train[i]) train[i]=le.transform(train[i]) test[i]=le.transform(test[i]) train=pd.get_dummies(train) test=pd.get_dummies(test) x=train.drop(columns=['TARGET']) y=train['TARGET'] from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test = train_test_split(x,y,test_size =0.2,random_state =10) from sklearn.preprocessing import MinMaxScaler from sklearn.impute import SimpleImputer imputer=SimpleImputer(strategy = 'median') x_train=imputer.fit_transform(x_train) x_test=imputer.transform(x_test) scaler=MinMaxScaler(feature_range = (0, 1)) x_train_scale=scaler.fit_transform(x_train) x_test_scale=scaler.transform(x_test) from sklearn.linear_model import LogisticRegression model = LogisticRegression(solver='liblinear', random_state=0) model.fit(x_train,y_train) pred = model.predict(x_train) model.score(x_train, y_train) from sklearn.metrics import classification_report print(classification_report(y_train, pred)) from sklearn.ensemble import RandomForestClassifier randomforest=RandomForestClassifier(max_depth=4, random_state=10) randomforest.fit(x_train_scale,y_train) from sklearn import metrics y_pred=randomforest.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) import xgboost as xgb from sklearn.metrics import mean_squared_error dmatrix = xgb.DMatrix(data=x,label=y) xg_reg = xgb.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1,max_depth = 5, alpha = 10, n_estimators = 10) xg_reg.fit(x_train,y_train) pred = xg_reg.predict(x_test) import numpy as np r_mean_square_error = np.sqrt(mean_squared_error(y_test, pred)) print("RMSE: %f" % (r_mean_square_error)) from sklearn.metrics import roc_auc_score score = roc_auc_score(y_test,pred) print(score) ```
github_jupyter
# Measuring PROV Provenance on the Web of Data * Authors: * [Paul Groth](http://pgroth.com), [Elsevier Labs](http://labs.elsevier.com) * [Wouter Beek](http://www.wouterbeek.com), Vrije Universiteit Amsterdam * Date: May 11, 2016 One of the motivations behind the original charter for the [W3C Provenance Incubator group](https://www.w3.org/2005/Incubator/prov/charter) was the need for provenance information for Semantic Web and Linked Data applications. Thus, a question to ask, three years after the introduction of the [W3C PROV family of documents](https://www.w3.org/TR/prov-overview/), is what is the adoption of PROV by the Semantic Web community. A proxy for this adoption is measuring how often PROV is used within Linked Data. In this work, we begin to do such a measurement. Our analytics are based on the [LOD Laundromat](http://lodlaundromat.org/) (Beek et al. 2014). The LOD Laudromat crawls and cleans over 650 thousand linked data documents representing over 38 billion triples. LOD Laudromat has been used in the past to do large scale analysis of linked data (Rietveld et al. 2015). Here, we focus on core statistics based around what [PROV-DM](http://www.w3.org/TR/prov-dm/) refers to as core structures. We only look at directly asserted information about resources in the dataset (i.e. no inference was performed before calculating these statistics). ``` from IPython.display import HTML display(HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> We note that the code for our analysis is embeded within this document but is by default hidden for easier reading. To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')) ``` Additionally, all code is available [online](https://github.com/pgroth/prov-wod-analysis) ``` import requests nsr = requests.get("http://index.lodlaundromat.org/ns2d/", params={"uri":"http://www.w3.org/ns/prov#"}) total_prov_docs = nsr.json()["totalResults"] nsr = requests.get("http://index.lodlaundromat.org/ns2d/", params={"uri":"http://www.w3.org/ns/prov#","limit":total_prov_docs} ) import io from rdflib.namespace import RDFS, RDF from rdflib.namespace import Namespace from rdflib import Graph from rdflib import URIRef PROV = Namespace('http://www.w3.org/ns/prov#') entitySubclasses = [] activitySubclasses = [] agentSubclasses = [] totalNumberOfEntities = 0 totalNumberOfActivities = 0 totalNumberOfAgents = 0 numWasDerivedFrom = 0 numUsed = 0 numWGB = 0 numWAW = 0 numWasAttributedTo = 0 for doc in nsr.json()["results"]: #print(doc) headers = {'Accept': 'text/turtle'} x = requests.get("http://ldf.lodlaundromat.org/" + doc, headers=headers) txt_res = x.text tmpGraph = Graph() tmpGraph.parse(io.StringIO(txt_res), format="turtle") #print(doc + " " + str(len(tmpGraph))) for entityClass in tmpGraph.subjects(RDFS.subClassOf, PROV.Entity): #print(entityClass) entitySubclasses.append(entityClass) for entity in tmpGraph.subjects(RDF.type, PROV.Entity): totalNumberOfEntities = totalNumberOfEntities + 1 for activityClass in tmpGraph.subjects(RDFS.subClassOf, PROV.Activity): #print(activityClass) activitySubclasses.append(activityClass) for activity in tmpGraph.subjects(RDF.type, PROV.Activity): totalNumberOfActivities = totalNumberOfActivities + 1 for agentClass in tmpGraph.subjects(RDFS.subClassOf, PROV.Agent): #print(agentClass) agentSubclasses.append(agentClass) for agent in tmpGraph.subjects(RDF.type, PROV.Agent): totalNumberOfAgents = totalNumberOfAgents + 1 ##look at relations for s,p,o in tmpGraph.triples( (None, PROV.wasDerivedFrom, None )): numWasDerivedFrom = numWasDerivedFrom + 1 for s,p,o in tmpGraph.triples( (None, PROV.used, None )): numUsed = numUsed + 1 for s,p,o in tmpGraph.triples( (None, PROV.wasGeneratedBy, None )): numWGB = numWGB + 1 for s,p,o in tmpGraph.triples( (None, PROV.wasAssociatedWith, None )): numWAW = numWAW + 1 for s,p,o in tmpGraph.triples( (None, PROV.wasAttributedTo, None) ): numWasAttributedTo = numWasAttributedTo + 1 from IPython.display import display, Markdown output = "### Statistics \n" output += "We first look at how many times both the namespace is declared and how many resources are of a given core type.\n" output += "* The PROV namespace occurs in " + str(total_prov_docs) + " documents.\n" output += "* Number of Entites: " + str(totalNumberOfEntities) + "\n" output += "* Number of Activities: " + str(totalNumberOfActivities) + "\n" output += "* Number of Agents: " + str(totalNumberOfAgents) + "\n\n" output += "We also looked at the number of PROV edges that were used with the various documents.\n" output += "* Number of wasDerivedFrom edges: " + str(numWasDerivedFrom) + "\n" output += "* Number of used edges: " + str(numUsed) + "\n" output += "* Number of wasGeneratedBy edges: " + str(numWGB) + "\n" output += "* Number of wasAssociatedWith edges: " + str(numWAW) + "\n" output += "* Number of wasAttributedTo edges: " + str(numWasAttributedTo) + "\n\n" display(Markdown(output)) ``` We also note that PROV has been extended by 8 other ontologies as calculated by manual inspection of the extensions of the various core classes as listed in the appendix. ### Conclusion This initial analysis shows some uptake within the Semantic Web community. However, while PROV is widely referenced within the community's literature, it appears, that direct usage of the standard could be improved (at least within the dataset represented by the LOD Laudromat). It should be noted that our analysis is preliminary and there is a much room for further work. In particular, we aim to look at the indirect usage of PROV through usage by ontologies that extend it (e.g. The Provenance Vocabulary) or that map to it such as Dublin Core or [PAV](http://pav-ontology.github.io/pav/). Understanding such indirect usage will help us better understand the true state of provenance interoperability within Linked Data. Likewise, it would be interesting to perform network analysis to understand the role that PROV plays within the Linked Data network. ### References * Beek, W. & Rietveld, L & Bazoobandi, H.R. & Wielemaker, J. & Schlobach, S.: LOD Laundromat: A Uniform Way of Publishing Other People's Dirty Data. Proceedings of the International Semantic Web Conference (2014). * Rietveld, L. & Beek, W. & Schlobach, S.: LOD Lab: Experiments at LOD Scale. Proceedings of the International Semantic Web Conference (2015). ### Appendix: Classes that subclass a PROV core class ``` print("Subclasses of Entity") for i in entitySubclasses: print(i) print("Subclasses of Activity") for i in activitySubclasses: print(i) print("Subclasses of Agent") for i in agentSubclasses: print(i) ```
github_jupyter
``` try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: !pip install -q tensorflow-gpu>=2.0.0 !pip install --quiet neural-structured-learning from __future__ import absolute_import, division, print_function, unicode_literals import neural_structured_learning as nsl import tensorflow as tf # Resets notebook state tf.keras.backend.clear_session() print("Version: ", tf.__version__) print("Eager mode: ", tf.executing_eagerly()) print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE") !wget --quiet -P /tmp https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz !tar -C /tmp -xvzf /tmp/cora.tgz !wget https://raw.githubusercontent.com/tensorflow/neural-structured-learning/master/neural_structured_learning/examples/preprocess/cora/preprocess_cora_dataset.py !python preprocess_cora_dataset.py \ --input_cora_content=/tmp/cora/cora.content \ --input_cora_graph=/tmp/cora/cora.cites \ --max_nbrs=5 \ --output_train_data=/tmp/cora/train_merged_examples.tfr \ --output_test_data=/tmp/cora/test_examples.tfr ### Experiment dataset TRAIN_DATA_PATH = '/tmp/cora/train_merged_examples.tfr' TEST_DATA_PATH = '/tmp/cora/test_examples.tfr' ### Constants used to identify neighbor features in the input. NBR_FEATURE_PREFIX = 'NL_nbr_' NBR_WEIGHT_SUFFIX = '_weight' class HParams(object): """Hyperparameters used for training.""" def __init__(self): ### dataset parameters self.num_classes = 7 self.max_seq_length = 1433 ### neural graph learning parameters self.distance_type = nsl.configs.DistanceType.L2 self.graph_regularization_multiplier = 0.1 self.num_neighbors = 1 ### model architecture self.num_fc_units = [50, 50] ### training parameters self.train_epochs = 100 self.batch_size = 128 self.dropout_rate = 0.5 ### eval parameters self.eval_steps = None # All instances in the test set are evaluated. HPARAMS = HParams() def parse_example(example_proto): """Extracts relevant fields from the `example_proto`. Args: example_proto: An instance of `tf.train.Example`. Returns: A pair whose first value is a dictionary containing relevant features and whose second value contains the ground truth labels. """ # The 'words' feature is a multi-hot, bag-of-words representation of the # original raw text. A default value is required for examples that don't # have the feature. feature_spec = { 'words': tf.io.FixedLenFeature([HPARAMS.max_seq_length], tf.int64, default_value=tf.constant( 0, dtype=tf.int64, shape=[HPARAMS.max_seq_length])), 'label': tf.io.FixedLenFeature((), tf.int64, default_value=-1), } # We also extract corresponding neighbor features in a similar manner to # the features above. for i in range(HPARAMS.num_neighbors): nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'words') nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i, NBR_WEIGHT_SUFFIX) feature_spec[nbr_feature_key] = tf.io.FixedLenFeature( [HPARAMS.max_seq_length], tf.int64, default_value=tf.constant( 0, dtype=tf.int64, shape=[HPARAMS.max_seq_length])) # We assign a default value of 0.0 for the neighbor weight so that # graph regularization is done on samples based on their exact number # of neighbors. In other words, non-existent neighbors are discounted. feature_spec[nbr_weight_key] = tf.io.FixedLenFeature( [1], tf.float32, default_value=tf.constant([0.0])) features = tf.io.parse_single_example(example_proto, feature_spec) labels = features.pop('label') return features, labels def make_dataset(file_path, training=False): """Creates a `tf.data.TFRecordDataset`. Args: file_path: Name of the file in the `.tfrecord` format containing `tf.train.Example` objects. training: Boolean indicating if we are in training mode. Returns: An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example` objects. """ dataset = tf.data.TFRecordDataset([file_path]) if training: dataset = dataset.shuffle(10000) dataset = dataset.map(parse_example) dataset = dataset.batch(HPARAMS.batch_size) return dataset train_dataset = make_dataset(TRAIN_DATA_PATH, training=True) test_dataset = make_dataset(TEST_DATA_PATH) for feature_batch, label_batch in train_dataset.take(1): print('Feature list:', list(feature_batch.keys())) print('Batch of inputs:', feature_batch['words']) nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, 0, 'words') nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, 0, NBR_WEIGHT_SUFFIX) print('Batch of neighbor inputs:', feature_batch[nbr_feature_key]) print('Batch of neighbor weights:', tf.reshape(feature_batch[nbr_weight_key], [-1])) print('Batch of labels:', label_batch) for feature_batch, label_batch in test_dataset.take(1): print('Feature list:', list(feature_batch.keys())) print('Batch of inputs:', feature_batch['words']) nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, 0, 'words') nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, 0, NBR_WEIGHT_SUFFIX) print('Batch of neighbor inputs:', feature_batch[nbr_feature_key]) print('Batch of neighbor weights:', tf.reshape(feature_batch[nbr_weight_key], [-1])) print('Batch of labels:', label_batch) ```
github_jupyter