code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="A0-sUApS7MIs" # _Lambda School Data Science — Tree Ensembles_ # # # Decision Trees Assignment # - # ## Part 1: House Price Regression # # Apply decision trees to the Ames housing dataset you've worked with this week! # # - Try multiple features # - Try features you've engineered # - Try different `max_depth` paramaters # - What's the best Test Root Mean Squared Error you can get? *Share with your cohort on Slack!* # - What's a cool visualization you can make? *Share with your cohort on Slack!* # + [markdown] colab_type="text" id="kWm7mMlH9sl2" # ## Part 2 / Stretch: "Play Tennis" Classification # + [markdown] colab_type="text" id="2f5-S87kg6gJ" # We'll reproduce the "Play Tennis" example from <NAME>lan's 1986 paper, [Induction of Decison Trees](https://link.springer.com/content/pdf/10.1007%2FBF00116251.pdf). # # [According to Wikipedia](https://en.wikipedia.org/wiki/Ross_Quinlan), "<NAME> is a computer science researcher in data mining and decision theory. He has contributed extensively to the development of decision tree algorithms, including inventing the canonical C4.5 and ID3 algorithms." # + [markdown] colab_type="text" id="Qfv6Zwdoje7_" # #### "Table 1 shows a small training set" # + colab={} colab_type="code" id="oUE-G0pgg58u" import pandas as pd columns = 'No. Outlook Temperature Humidity Windy PlayTennis'.split() raw = """1 sunny hot high false N 2 sunny hot high true N 3 overcast hot high false P 4 rain mild high false P 5 rain cool normal false P 6 rain cool normal true N 7 overcast cool normal true P 8 sunny mild high false N 9 sunny cool normal false P 10 rain mild normal false P 11 sunny mild normal true P 12 overcast mild high true P 13 overcast hot normal false P 14 rain mild high true N""" data = [row.split() for row in raw.split('\n')] tennis = pd.DataFrame(data=data, columns=columns).set_index('No.') tennis['PlayTennis'] = (tennis['PlayTennis'] == 'P').astype(int) tennis # + [markdown] colab_type="text" id="BvFu9kvJj9kk" # #### "A decision tree that correctly classifies each object in the training set is given in Figure 2." # # <img src="https://i.imgur.com/RD7d0u0.png" height="300"> # + [markdown] colab_type="text" id="kHkkeALqjNiS" # In this dataset, the tennis player decided to play on 64% of the days, and decided not to on 36% of the days. # + colab={} colab_type="code" id="qeLDinBihMDQ" tennis['PlayTennis'].value_counts(normalize=True) * 100 # + [markdown] colab_type="text" id="Ype2-apnlaG2" # The tennis player played on 100% of the overcast days, 40% of the sunny days, and 60% of the rainy days # + colab={} colab_type="code" id="LfBMB0Soh58T" tennis.groupby('Outlook')['PlayTennis'].mean() * 100 # + [markdown] colab_type="text" id="jx3-MFvalrQC" # On sunny days, the tennis player's decision depends on the humidity. (The Outlook and Humidity features interact.) # + colab={} colab_type="code" id="FmVN7hwEiHE5" sunny = tennis[tennis['Outlook']=='sunny'] sunny.groupby('Humidity')['PlayTennis'].mean() * 100 # + [markdown] colab_type="text" id="y0it0HsFl47M" # On rainy days, the tennis player's decision depends on the wind. (The Outlook and Windy features interact.) # + colab={} colab_type="code" id="CeOd7rGeivJp" rainy = tennis[tennis['Outlook']=='rain'] rainy.groupby('Windy')['PlayTennis'].mean() * 100 # - # #### Before modeling, we will ["encode" categorical variables, using pandas](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html?highlight=get_dummies#computing-indicator-dummy-variables) # + colab={} colab_type="code" id="VCyn34nEi0nz" y = tennis['PlayTennis'] X = pd.get_dummies(tennis.drop(columns='PlayTennis')) X # + [markdown] colab_type="text" id="UvOkHk8o-h_r" # ## Train a Decision Tree Classifier # Get a score of 100% (accuracy) # # https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html # - # + [markdown] colab_type="text" id="9uNPrbfS-vzL" # ## Compare to Logistic Regression # https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html # - # + [markdown] colab_type="text" id="EwF0phe6-oR8" # ## Visualize the tree # https://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html # -
module4-decision-trees/decision-trees-assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### This notebook reproduces the urban accessibility map for Munich, from this tutorial: # https://towardsdatascience.com/measuring-pedestrian-accessibility-97900f9e4d56 # # ## Steps # * Step 1: get the POIs for Munich, including GPS coordinates # * Step 2: create Pandana network # * Step 3: assign POIs, compute shortest distances # * Step 4: plot the shortest distances on the accesibility heatmap # # + # libraries needed import pandas as pd import numpy as np import datetime as dt import time import os import pandana as pdna import geopandas as gpd import osmnx as ox from shapely.geometry.polygon import Polygon from shapely.geometry import box from geopy.geocoders import Nominatim from geopy.extra.rate_limiter import RateLimiter # to display plot in the notebook # %matplotlib inline # - # ## Step 1: get the POIs for Munich # Get network map and POIs from the network # # Use the documentation from here: # https://osmnx.readthedocs.io/en/stable/osmnx.html # # get a GeoDataFrame from Munich place = ox.gdf_from_place('Munich, Germany') bounds_city = place['geometry'][0].bounds print(bounds_city) # order bounds: 'east','north','south','west' polygon_city = Polygon((list(box(bounds_city[0],bounds_city[1],bounds_city[2],bounds_city[3]).exterior.coords))) # set POIs of interest amenity = 'hospital'#['hospital','cafe','school','pharmacy']#'restaurant','clinic','bank','park', # + # get POIs from Polygon pois = ox.pois.pois_from_polygon(polygon_city, [amenity]) print('Number of POIs') print(len(pois)) print(pois[['amenity','name']].head()) # a long list of all the info about the POIs: (maybe useful in the future) # print(list(pois.columns)) # - ''' # If we want to plot the network: # city graph G = ox.graph_from_address('Munich, Germany') ox.plot_graph(G) ''' # + ''' # IF we want to plot the POIs on the map: import matplotlib.pyplot as plt world = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres')) # We restrict to South America. ax = world[world.continent == 'Europe'].plot( color='white', edgecolor='black') # We can now plot our ``GeoDataFrame``. pois.plot(ax=ax, color='red') plt.show() ''' # - # Now we need to transform the addresses in GPS coordinates, because they are not available in the POIs data frame # ### First test geocoding works with Nominatim (number of requests is limited) # If the next cell fails, a geocoding alternative to extract the coordinates from POI names may be needed. # A list of alternatives is here: https://wiki.openstreetmap.org/wiki/Search_engines # Failed attempts (adddresses not recognized): # * use Nominatim rate_limiter.RateLimiter to introduce delays and retries # * geocoder Python: https://geocoder.readthedocs.io/ # * overpass API: https://wiki.openstreetmap.org/wiki/Overpass_API/Python_Wrapper # Code not working on POI addersses # import overpass # api = overpass.API() # response = api.get('node["name"="6 Gartenstadtstraße München DE"]') # response['features'][0]['geometry']['coordinates'][0] # # Working but need to call from Python: # * http://photon.komoot.de/ # * https://www.ideeslibres.org/GeoCheck/ # basic test we can encode a location by address: geolocator = Nominatim(user_agent="y") location = geolocator.geocode("6 Gartenstadtstraße München DE") print((location.latitude, location.longitude)) # + t1 = time.time() pois['complete address'] = pois['addr:housenumber'] + ' ' + pois['addr:street'] + ' ' + pois['addr:city'] + ' ' + pois['addr:country'] # this takes long for all POIs (several minutes or more) (alternatively take less POIS # limited to first 100 points for now pois_test = pois.reset_index().copy() def fct(x): try: location = geolocator.geocode(x) except: return np.nan return location t2 = time.time() pois_test['loc'] = pois_test['complete address'].apply(lambda x: fct(x)) print('time geocoding') print(t2-t1) pois_test = pois_test.dropna(subset=['loc']).reset_index() # extract coordinates pois_test['lat'] = pois_test['loc'].apply(lambda x:x.latitude) pois_test['lon'] = pois_test['loc'].apply(lambda x:x.longitude) pois_test.loc[pois_test['complete address'].isnull(),'lat'] = np.NaN pois_test.loc[pois_test['complete address'].isnull(),'lon'] = np.NaN pois_test = pois_test[['name','lat','lon','complete address']].dropna() print(pois_test.head()) # always cache geocoded addresses because there a limit per day: if not os.path.exists('pois_coord'): os.makedirs('pois_coord') pois_test.to_pickle('pois_coord/' + amenity + '_' + str(dt.datetime.now()).replace(':','').replace(' ','-').replace('.','-') + '.pkl') # - # check coordinates fine: pois_test.head() # Keep the POIs for later, next create the Pandana network we want to compute the shortest paths on # ## Step 2: create Pandana network, assign POIs, compute shortest distances # Create Pandana network, precomute, initiatilize, set POIs and calculate nearest POIs as in the tutorial: # https://udst.github.io/pandana/tutorial.html # # First need to transform the nodes and edges from the shapefile to match the Pandana format: # * extract x,y columns for coordinates # * extract weight column of edges # * make the nodes index int, from the osmid, by reseting the index of nodes and merging # # ### only run the next cell if no shape file already available # + # reorder the bounds_city to match the funciton description: # from longitude min, latitude min, longitude max, latitude max to # north, south, east, west # this takes long! several minutes or longer G = ox.graph_from_bbox(bounds_city[3], bounds_city[1], bounds_city[2], bounds_city[0], network_type='walk') # get road network and save as .shp ox.save_graph_shapefile(G, filename='sample', folder='data', encoding='utf-8') # - # ### from here on run cells normally nodes = gpd.read_file('data/sample/nodes/nodes.shp') edges = gpd.read_file('data/sample/edges/edges.shp') nodes['x'] = nodes['geometry'].apply(lambda x:x.coords[0][0]) nodes['y'] = nodes['geometry'].apply(lambda x:x.coords[0][1]) edges['weight'] = edges['length'].astype(float) nodes['x'] = nodes['x'].astype(float) nodes['y'] = nodes['y'].astype(float) nodes['ix']=nodes.index test = pd.merge(nodes[['osmid','ix']].rename(columns={'osmid':'from','ix':'from_ix'}),edges,on='from') test = pd.merge(nodes[['osmid','ix']].rename(columns={'osmid':'to','ix':'to_ix'}),test,on='to') nodes.head() net=pdna.Network(nodes["x"], nodes["y"], test["from_ix"], test["to_ix"], test[["weight"]]) # check number of nodes in the network net.node_ids print('this takes also some minutes') net.precompute(3000) # ## Step 3: calculate shortest distances to POIs # (continue tutorial from here: https://udst.github.io/pandana/tutorial.html) # net.init_pois(2,10000,10) # if not already existing load it from cached data pois_test = pd.read_pickle('pois_coord/hospital_2020-01-30-123302-273052.pkl') print(len(pois_test)) # set pois coordinates net.set_pois(amenity, pois_test['lon'], pois_test['lat']) # ## Step 4: plot the shortest distances as accesibility heatmap # ## Still need to debug! # # accessiblity df are the distances from POIs to nearest restaurant # # need to check why they are mostly = max distance (2000m) # + amenity = 'hospital' def plot_nearest_amenity(network,amenity,n, distance, num_pois, bbox): accessibility = network.nearest_pois(distance=distance, category=amenity, num_pois=num_pois) # important to rearrange the coordinates: network.plot(accessibility[n], [bounds_city[1],bounds_city[0],bounds_city[3],bounds_city[2]]) #ax.set_facecolor('k') #ax.set_title('Pedestrian accessibility in Casablanca (Walking distance to {}, meters (n = {}))'.format(amenity,n), fontsize=14); plot_nearest_amenity(net,amenity,1, distance=1000, num_pois=3, bbox = bounds_city) # - # ### Extensions # * compare walking distance to 1st vs 5th nearest POI -> the 5th nearest POI is a better measure of walkability # * define and calculate a metric which weights more POI types (school, clinic, restaurant) (as suggested in the article) # * extend as in the tutorial to plot time to travel using public transport, road or waling - unclear how this is done exactly: "let’s build a grid of 250 square meter cells behind our road network, attach each cell to its closest node, and translate the distances into walk, drive or public transit time.") # * extensions from tutorial: # * "Good quality, locally produced GIS data can be used instead of OSM (and indeed is preferable)." # * "An important extension: this method is a great input for property price modelling or building-level predictive models. Hedonic price models assume that, for example, home buyers pay more for a home where they can easily walk to a Starbucks or drive to a hospital. Snapping tax lot boundaries (or OSM building footprints) to the network, the same way as we just attached grid squares to it, allows us to derive this data for any given property." # * "Likewise, machine learning models to predict crime or fire risk require as many attributes as possible to characterize each building’s place in the urban fabric. Rapid network-constrained queries through this kind of framework can answer many questions "
accessibility/plot_urban_accessibility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="esFQHHUNKbqb" colab_type="code" colab={} import numpy as np import pandas as pd from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split import random, math, os, time # + id="zdRJ672gKgOc" colab_type="code" colab={} SEED = 1234 random.seed(SEED) # + id="cT4xzZAYXv5y" colab_type="code" colab={} import matplotlib.pyplot as plt # + id="nf-OIx9CKhjC" colab_type="code" colab={} filepath = 'https://raw.githubusercontent.com/rani700/SSIM/master/SSIM/data/simplified_PM25.csv' df = pd.read_csv(filepath, dayfirst=True) # + id="OJqQV7NVLS3w" colab_type="code" colab={} train_sampling_params = { 'dim_in': 11, 'output_length': 5, 'min_before': 20, 'max_before': 25, 'min_after': 20, 'max_after': 25, } # + id="_iEGE-UkMVsj" colab_type="code" colab={} test_sampling_params = { 'dim_in': 11, 'output_length': 5, 'min_before': 20, 'max_before': 25, 'min_after': 20, 'max_after': 25, } # + id="gQb36AFFKj-q" colab_type="code" outputId="931afd24-8dc2-4dc0-d39f-5aa47d5b66f9" colab={"base_uri": "https://localhost:8080/", "height": 195} df.head() # + id="GCRV1BXYKnI6" colab_type="code" colab={} len(df[:5000]) df = df[:5000] # + id="FEZoD24g3OJo" colab_type="code" outputId="0bbc2b3c-25e5-49b5-911d-cf919463a0b0" colab={"base_uri": "https://localhost:8080/", "height": 34} df.iloc[0, 1] # + id="Xa-Wu-jrKo2a" colab_type="code" outputId="49c7ca9b-4a01-4a7e-fa5d-8df6ca2e4c6f" colab={"base_uri": "https://localhost:8080/", "height": 87} print(df.iloc[0, 0]) print(df.iloc[4000, 0]) print(df.iloc[4001, 0]) print(df.iloc[4999, 0]) df.set_index('date', inplace=True) # + id="0iDB97mVKqA6" colab_type="code" outputId="d2be866e-9d03-4dc0-b261-e788ec377300" colab={"base_uri": "https://localhost:8080/", "height": 225} df.head() # + id="IqtM_6wwKrih" colab_type="code" colab={} pm25 = df['pm2.5'].values.copy().reshape(-1, 1) # + id="nw8nedAGKtV4" colab_type="code" outputId="9fc199e4-0000-46d5-ff2e-43bda251c8c5" colab={"base_uri": "https://localhost:8080/", "height": 34} scaler_x = StandardScaler() scaler_x.fit(df[['pm2.5', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']]) # + id="Sg3EabLGKvNQ" colab_type="code" colab={} df[['pm2.5', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']] = scaler_x.transform(df[['pm2.5', 'DEWP', 'TEMP', 'PRES', 'Iws', 'Is', 'Ir']]) # + id="wsCFEBY6KxUX" colab_type="code" colab={} scaler_y = StandardScaler() scaler_y.fit(pm25) y_all = scaler_y.transform(pm25) # + id="aiHuv1SMK0EP" colab_type="code" colab={} df_train = df.loc['2/01/2010 0:00': '17/06/2010 16:00'].copy() df_test = df.loc['17/06/2010 17:00': '29/07/2010 7:00'].copy() # + id="g-Aje_VNK2op" colab_type="code" colab={} y = y_all # + id="SE2nqexWK4Q3" colab_type="code" colab={} def train_val_test_generate(dataframe, model_params): ''' :param dataframe: processed dataframe :param model_params: for input dim :return: train_x, train_y, test_x, test_y with the same length (by padding zero) ''' train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples = pad_all_cases(dataframe, dataframe['pm2.5'].values, model_params, model_params['min_before'], model_params['max_before'], model_params['min_after'], model_params['max_after'], model_params[ 'output_length']) train_val_test_y = np.expand_dims(train_val_test_y, axis=2) return train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples # + id="nMrsAqS2K6V_" colab_type="code" colab={} def generate_samples(x, y, model_params, seq_len_before=7, seq_len_after=7, output_seq_len=9): """ Generate samples, input past and future, target middle :param x: input dataframe :param y: target variable to impute :param seq_len_before: :param seq_len_after: :param output_seq_len: :return: (inputsequence, targetsequence) """ total_samples = x.shape[0] total_len = seq_len_before + seq_len_after + output_seq_len input_batch_idxs = [list(range(i, i + seq_len_before)) + list( range(i + seq_len_before + output_seq_len, i + seq_len_before + output_seq_len + seq_len_after)) for i in range((total_samples - total_len + 1))] input_seq = np.take(x, input_batch_idxs, axis=0) z = np.zeros((output_seq_len, model_params['dim_in'])) input_seq = np.array([np.concatenate((i[:seq_len_before], z, i[seq_len_before:])) for i in input_seq]) output_batch_idxs = [list(range(i + seq_len_before, i + seq_len_before + output_seq_len)) for i in range((total_samples - total_len + 1))] output_seq = np.take(y, output_batch_idxs, axis=0) return input_seq, output_seq def pad_all_cases(x, y, model_params, min_len_before=7, max_len_before=9, min_len_after=7, max_len_after=9, targetlength=9): """ variable length inputs, fix length outputs :param x: input dataframe :param y: target variable to impute :param min_len_before: :param max_len_before: :param min_len_after: :param max_len_after: :param targetlength: :return: inputsequence with same length, outputsequence with same length """ total_x = [] total_y = [] total_len_x = [] totle_len_before_x = [] for l_before in range(min_len_before, max_len_before + 1): for l_after in range(min_len_after, max_len_after + 1): case_x, case_y = generate_samples(x.values, y, model_params, l_before, l_after, targetlength) # npad is a tuple of (n_before, n_after) for each dimension len_x = np.full(case_x.shape[0], case_x.shape[1]) len_before_sequence_x = np.full(case_x.shape[0], l_before) npad = ((0, 0), (0, max_len_before - l_before + max_len_after - l_after), (0, 0)) same_length_x = np.pad(case_x, pad_width=npad, mode='constant', constant_values=0) total_x.append(same_length_x) total_y.append(case_y) total_len_x.append(len_x) totle_len_before_x.append(len_before_sequence_x) ## total x,y concatenated_x = np.concatenate(total_x, axis=0) concatenated_y = np.concatenate(total_y, axis=0) len_all_case = np.concatenate(total_len_x).ravel() len_before_all_case = np.concatenate(totle_len_before_x).ravel() return concatenated_x, concatenated_y, len_all_case, len_before_all_case # + id="zRUwUr7TK8Pm" colab_type="code" colab={} # x_samples, y_samples, x_len, x_before_len = train_val_test_generate(df_train, train_sampling_params) # def train_val_test_generate(dataframe, model_params): dataframe = df_train dataframe1 = df_test model_params = train_sampling_params # + id="AG-U3p3HMAY4" colab_type="code" colab={} x = dataframe x1 = dataframe1 y = dataframe['pm2.5'].values y1 = dataframe1['pm2.5'].values # model_params = model_params min_len_before = model_params['min_before'] max_len_before = model_params['max_before'] min_len_after = model_params['min_after'] max_len_after = model_params['max_after'] targetlength = model_params['output_length'] # + id="3o6hAfNUMEHG" colab_type="code" colab={} total_x = [] total_y = [] total_len_x = [] total_len_before_x = [] # + id="xOl--2TIMHE6" colab_type="code" colab={} def generate_samples(x, y, model_params, seq_len_before=7, seq_len_after=7, output_seq_len=9): """ Generate samples, input past and future, target middle :param x: input dataframe :param y: target variable to impute :param seq_len_before: :param seq_len_after: :param output_seq_len: :return: (inputsequence, targetsequence) """ total_samples = x.shape[0] total_len = seq_len_before + seq_len_after + output_seq_len input_batch_idxs = [list(range(i, i + seq_len_before)) + list( range(i + seq_len_before + output_seq_len, i + seq_len_before + output_seq_len + seq_len_after)) for i in range((total_samples - total_len + 1))] input_seq = np.take(x, input_batch_idxs, axis=0) z = np.zeros((output_seq_len, model_params['dim_in'])) input_seq = np.array([np.concatenate((i[:seq_len_before], z, i[seq_len_before:])) for i in input_seq]) output_batch_idxs = [list(range(i + seq_len_before, i + seq_len_before + output_seq_len)) for i in range((total_samples - total_len + 1))] output_seq = np.take(y, output_batch_idxs, axis=0) return input_seq, output_seq # + id="PsUmTa7VLs4a" colab_type="code" colab={} # + id="4vkM1dp0Levx" colab_type="code" colab={} def train_test_split_SSIM(x, y, x_len, x_before_len, model_params, SEED): ''' :param x: all x samples :param y: all y samples :param model_params: parameters :param SEED: random SEED :return: train set, test set ''' ## check and remove samples with NaN (just incase) index_list = [] for index, (x_s, y_s, len_s, len_before_s) in enumerate(zip(x, y, x_len, x_before_len)): if (np.isnan(x_s).any()) or (np.isnan(y_s).any()): index_list.append(index) x = np.delete(x, index_list, axis=0) y = np.delete(y, index_list, axis=0) x_len = np.delete(x_len, index_list, axis=0) x_before_len = np.delete(x_before_len, index_list, axis=0) x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=None, random_state=SEED, shuffle=False) x_train_len, x_test_len = train_test_split(x_len, test_size=None, random_state=SEED, shuffle=False) x_train_before_len, x_test_before_len = train_test_split(x_before_len, test_size=None, random_state=SEED, shuffle=False) return x_train, y_train, x_train_len, x_train_before_len # + id="7YDds1CoK-XW" colab_type="code" colab={} # + id="IxUwqdpgMLez" colab_type="code" colab={} for l_before in range(min_len_before, max_len_before+1): for l_after in range(min_len_after, max_len_after+1): case_x, case_y = generate_samples(x.values, y, model_params, l_before, l_after, targetlength) len_x = np.full(case_x.shape[0], case_x.shape[1]) len_before_sequence_x = np.full(case_x.shape[0], l_before) npad = ((0, 0), (0, max_len_before - l_before + max_len_after - l_after), (0, 0)) same_length_x = np.pad(case_x, pad_width=npad, mode='constant', constant_values=0) total_x.append(same_length_x) total_y.append(case_y) total_len_x.append(len_x) total_len_before_x.append(len_before_sequence_x) concatenated_x = np.concatenate(total_x, axis=0) concatenated_y = np.concatenate(total_y, axis=0) len_all_case = np.concatenate(total_len_x).ravel() len_before_all_case = np.concatenate(total_len_before_x).ravel() # ############### train_val_test_x = concatenated_x train_val_test_y = concatenated_y len_x_samples = len_all_case len_before_x_samples = len_before_all_case # + id="118a-68IwQSY" colab_type="code" colab={} x_samples, y_samples, x_len, x_before_len = train_val_test_x, train_val_test_y, len_x_samples, len_before_x_samples x_test, y_test, x_test_len, x_test_before_len = train_test_split_SSIM(x_samples, y_samples, x_len, x_before_len, test_sampling_params, SEED) # + id="iyHn9zNWMwFQ" colab_type="code" outputId="735532ba-456b-4eaf-a910-b97e19fdd31d" colab={"base_uri": "https://localhost:8080/", "height": 52} print('X_samples:{}'.format(x_samples.shape)) print('y_samples:{}'.format(y_samples.shape)) # + id="z1IKka41LXX0" colab_type="code" colab={} X = x_samples y = y_samples # + id="Nkiw9giDlEcN" colab_type="code" outputId="26401c67-7fc6-4a45-c4b4-72bdd034d7de" colab={"base_uri": "https://localhost:8080/", "height": 34} X.shape # + id="aot0ZvLInG4P" colab_type="code" colab={} X = X.reshape((X.shape[0], X.shape[1], X.shape[2], 1)) # + id="dxykLW_inSVG" colab_type="code" colab={} # + id="BonfMV6DlJZX" colab_type="code" outputId="a6f1e69c-fe23-4ba2-b24e-cb984dc73e49" colab={"base_uri": "https://localhost:8080/", "height": 34} X[0].shape # + id="yy0E6CKkN63v" colab_type="code" outputId="3ad7e83b-7485-40a3-a82c-10fcf61b1149" colab={"base_uri": "https://localhost:8080/", "height": 34} y.shape # + id="mA8dbaipN_ss" colab_type="code" colab={} # X[0] # + id="KLEV5I8ONLn_" colab_type="code" colab={} # + id="PrIP3xmBM_ob" colab_type="code" colab={} from keras.models import Sequential from keras.layers import Dense, Flatten from keras.layers.convolutional import Conv2D # + id="80SDJqv1NKn9" colab_type="code" colab={} model = Sequential() model.add(Conv2D(filters=64, kernel_size=3, activation='relu', input_shape=(55,11,1))) model.add(Conv2D(filters=32, kernel_size=3, activation='relu')) model.add(Flatten()) model.add(Dense(50, activation='relu')) model.add(Dense(5)) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) # + id="eepaWdR5xvn4" colab_type="code" colab={} import tensorflow as tf # + id="QWOb03AsNbI9" colab_type="code" outputId="9b7d7b6b-f653-4308-80b1-3171e8e6c4f3" colab={"base_uri": "https://localhost:8080/", "height": 1000} model.fit(X, y, epochs=20, verbose=1) # + id="_wsVpvbZO-Rj" colab_type="code" colab={} cnn_loss = model.history.history['loss'] # + id="L9aWg6ftDjFQ" colab_type="code" outputId="674c6412-eda4-4196-dcc6-154739e7aefb" colab={"base_uri": "https://localhost:8080/", "height": 369} cnn_loss # + id="HNDKjWifXeuD" colab_type="code" outputId="409c845c-090d-4dcc-87bf-b6e8433e8c39" colab={"base_uri": "https://localhost:8080/", "height": 265} plt.figure(figsize=(8,4)) plt.xticks(np.arange(0,25,1)) plt.plot(range(len(cnn_loss)),cnn_loss); # + id="JhIa-AuUXsId" colab_type="code" colab={} y_hat = model.predict(X) # + id="qPuGdSOYcG2R" colab_type="code" outputId="a371aeff-c249-462e-b80d-53c69394ad85" colab={"base_uri": "https://localhost:8080/", "height": 34} y_hat[:, 0].shape # + id="bL7KjI7HcKGz" colab_type="code" outputId="8c744b6b-245c-4875-d2bc-9741f97caef3" colab={"base_uri": "https://localhost:8080/", "height": 34} y[:, 0].shape # + id="10QrDm9Rcq7c" colab_type="code" outputId="b6096693-9356-405a-d8dd-35510e269f6a" colab={"base_uri": "https://localhost:8080/", "height": 265} plt.plot(y[:500, 0], 'r-', label='actual') plt.plot(y_hat[:500, 0], 'b-', label='predicted') plt.legend() plt.show() # + id="4VxyXa6_dCSn" colab_type="code" colab={} # + id="-8Vh3IrMdL_U" colab_type="code" colab={}
cnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + tags=["remove-input", "active-ipynb", "remove-output"] # try: # from openmdao.utils.notebook_utils import notebook_mode # except ImportError: # !python -m pip install openmdao[notebooks] # - # # Sanity Checking Your Model # # In the first two tutorials we showed you the basics of how to build up a model from a set of components, # group them together, connect them together, and optimize them. # # Sometimes you put your model together and things don't work quite the way you would expect. # When this happens, OpenMDAO has a number of [debugging](../../features/debugging/debugging) features to help you # understand the structure of your model better and sort out the issue. # Many debugging features are all accessed via a [command line script](../../other_useful_docs/om_command) that is installed along with OpenMDAO itself. # There are a lot of different tools that are accessible from that script, but in this tutorial we'll focus on the most important one: # [check setup](../../other_useful_docs/om_command). # # # ## Check Setup # # Check setup runs through a host of different tests to make sure your model is setup correctly and warn you about things that commonly cause problems. # It will: # # 1. identify any unconnected inputs (forgetting to connect things is one of the most common mistakes). # 2. look for any cycles in your model that indicate the need for solvers (did you mean to create that cycle?). # 3. recurse down the model hierarchy and give every group and component a chance to perform its own custom checks. # # For example, if you tried to build the [sellar problem using connections](sellar-connect), # but forgot to issue one of the connections then your problem wouldn't run correctly and you'd get the wrong answer. # + tags=["remove-input", "remove-output"] from openmdao.utils.notebook_utils import get_code from myst_nb import glue glue("code_src1", get_code("openmdao.test_suite.components.sellar.SellarDis1"), display=False) # - # :::{Admonition} `SellarDis1` class definition # :class: dropdown # # {glue:}`code_src1` # ::: # + tags=["remove-input", "remove-output"] from openmdao.utils.notebook_utils import get_code from myst_nb import glue glue("code_src2", get_code("openmdao.test_suite.components.sellar.SellarDis2"), display=False) # - # :::{Admonition} `SellarDis2` class definition # :class: dropdown # # {glue:}`code_src2` # ::: # + import numpy as np import openmdao.api as om from openmdao.test_suite.components.sellar import SellarDis1, SellarDis2 class SellarMDAConnect(om.Group): def setup(self): cycle = self.add_subsystem('cycle', om.Group(), promotes_inputs=['x', 'z']) cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=['x', 'z']) cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z']) cycle.connect('d1.y1', 'd2.y1') ###################################### # This is a "forgotten" connection!! ###################################### #cycle.connect('d2.y2', 'd1.y2') cycle.set_input_defaults('x', 1.0) cycle.set_input_defaults('z', np.array([5.0, 2.0])) # Nonlinear Block Gauss Seidel is a gradient free solver cycle.nonlinear_solver = om.NonlinearBlockGS() self.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)', z=np.array([0.0, 0.0]), x=0.0), promotes_inputs=['x', 'z']) self.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1')) self.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0')) self.connect('cycle.d1.y1', ['obj_cmp.y1', 'con_cmp1.y1']) self.connect('cycle.d2.y2', ['obj_cmp.y2', 'con_cmp2.y2']) prob = om.Problem() prob.model = SellarMDAConnect() prob.driver = om.ScipyOptimizeDriver() prob.driver.options['optimizer'] = 'SLSQP' # prob.driver.options['maxiter'] = 100 prob.driver.options['tol'] = 1e-8 prob.set_solver_print(level=0) prob.model.add_design_var('x', lower=0, upper=10) prob.model.add_design_var('z', lower=0, upper=10) prob.model.add_objective('obj_cmp.obj') prob.model.add_constraint('con_cmp1.con1', upper=0) prob.model.add_constraint('con_cmp2.con2', upper=0) prob.setup() prob.set_val('x', 2.0) prob.set_val('z', [-1., -1.]) prob.run_driver() print('minimum found at') print(prob.get_val('x')[0]) print(prob.get_val('z')) print('minumum objective') print(prob.get_val('obj_cmp.obj')[0]) # + [markdown] tags=["remove-input"] # If you are in colab, the shell command will not find the file because it is a single notebook without the included file. # - # !openmdao check -c all sellar.py # This output tells you several things: # # 1. You have an unconnected input: `cycle.d1.y2` # 2. There are no reported cycles in your model, but there should be because this is supposed to be a coupled model! # # Whenever you encounter a problem, before you look at anything else you should always run this check first and look over the output carefully.
openmdao/docs/openmdao_book/basic_user_guide/command_line/check_setup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib notebook import matplotlib import seaborn as sb from matplotlib import pyplot as plt import numpy as np # Jupyter Specifics # %matplotlib inline from IPython.display import display, HTML from ipywidgets.widgets import interact, interactive, IntSlider, FloatSlider, Layout, ToggleButton, ToggleButtons, fixed display(HTML("<style>.container { width:100% !important; }</style>")) style = {'description_width': '100px'} slider_layout = Layout(width='99%') # + import umap import umap.plot from sklearn.decomposition import PCA from sklearn import cluster from sklearn.cluster import AgglomerativeClustering import hdbscan # - # # Introduction # # This is a purely empirical study on dependence of the validity measure of clustering quality, as a function of dimension. # # The approach is to create two clusters whose centers are a fixed distance, embedded in spaces of varying dimensions, calculating validity for clusterings in each dimension, to infer dependence of validity on dimension. # # Hypothesis is that if the clusters are separated by the same Euclidean distance, the clustering quality should be equivalent, so if validity changes systematically with dimension it must be corrected to be a good clustering quality measure # # 2d for intuition from numpy.random import multivariate_normal as mnorm # Call as: # ``` # mnorm(mean, covariance, Npoints) # ``` [3]*10+4*[10] (1, 3) # + wid = 0.2 foo1 = mnorm(np.array([-1,-1]),np.array([[wid,0],[0,wid]]),100) foo2 = mnorm(np.array([1,1]),np.array([[wid,0],[0,wid]]),100) foo = np.concatenate((foo1,foo2)) colors = [1]*100+[0]*100 plt.scatter(foo[:,0],foo[:,1],c=colors); # - np.concatenate((foo1,foo2)).shape # + wid = 0.2 foo1 = mnorm(np.array([-1,-1]),np.array([[wid,0],[0,wid]]),100) foo2 = mnorm(np.array([1,1]),np.array([[wid,0],[0,wid]]),100) foo = np.concatenate((foo1,foo2)) min_cluster_size = minc = 3 min_samples = 3 clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples,gen_min_span_tree=True) fool = clusterer.fit_predict(foo) hdbscan.validity.validity_index(foo, fool) # + val = [] xx = np.arange(0.02,0.5,.02) min_cluster_size = minc = 3 min_samples = 3 clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples,gen_min_span_tree=True) nave=10 for wid in xx: valtmp = [] for _ in range(nave): foo1 = mnorm(np.array([-1,-1]),np.array([[wid,0],[0,wid]]),100) foo2 = mnorm(np.array([1,1]),np.array([[wid,0],[0,wid]]),100) foo = np.concatenate((foo1,foo2)) fool = clusterer.fit_predict(foo) valtmp.append(hdbscan.validity.validity_index(foo, fool)) val.append(np.mean(valtmp)) # - plt.plot(xx,val) plt.ylabel("validity") plt.xlabel('cov'); # # n dim # Setup: # # * Create two clusters of 100 random points, centered at (-1,...,-1) and at (1,...,1) in d-dim space # * run hdbscan to detect the clusters # * compute validity # * repeat 40 times, to get average validity. # * repeat for d ranging from 2 to 15 dim = 10 np.full(dim,-1) dim=4 foo = np.zeros((dim,dim)) for i in range(len(foo)): foo[i][i] = 0.2 foo # + wid = 0.2 dim = 4 mn1 = np.full(dim,-1) mn2 = np.full(dim,1) covar = np.zeros((dim,dim)) for i in range(len(covar)): covar[i][i] = wid foo1 = mnorm(mn1,covar,100) foo2 = mnorm(mn2,covar,100) foo = np.concatenate((foo1,foo2)) min_cluster_size = minc = 3 min_samples = 3 clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples,gen_min_span_tree=True) fool = clusterer.fit_predict(foo) print(hdbscan.validity.validity_index(foo, fool)) # - plt.scatter(foo[:,1],foo[:,2]) # + min_cluster_size = minc = 3 min_samples = 3 clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples,gen_min_span_tree=True) wid = 0.2 dim = 4 nave = 40 val = [] for dim in range(2,16): valtmp=[] for _ in range(nave): mn1 = np.full(dim,-1) mn2 = np.full(dim,1) covar = np.zeros((dim,dim)) for i in range(len(covar)): covar[i][i] = wid foo1 = mnorm(mn1,covar,100) foo2 = mnorm(mn2,covar,100) foo = np.concatenate((foo1,foo2)) fool = clusterer.fit_predict(foo) valtmp.append(hdbscan.validity.validity_index(foo, fool)) val.append(np.mean(valtmp)) # - xx = list(range(2,16)) plt.plot(xx,val) plt.xlabel('dimension') plt.ylabel('validity') plt.title('Validty: ave of 40 clusterings'); # **Check to see how Euclidean distance between (-1,...,-1) and (1,...,1) changes**: # + from scipy.spatial.distance import euclidean as dist dd = [] ddd = [] for dim in range (2,16): mn1 = np.full(dim,-1) mn2 = np.full(dim,1) dst = dist(mn1,mn2) dd.append(dst) mnn1 = [x/dst for x in mn1] mnn2 = [x/dst for x in mn2] ddd.append(dist(mnn1,mnn2)) plt.plot(list(range(2,16)),ddd) plt.plot(list(range(2,16)),dd); # - # **Put clusters unit distance apart along line between (-1,...,-1) and (1,...,.1)** # # Normalize so that Euclidean distance between cluster centers is always the same (1). Then check validity dependence on dimension. ['wid = {}'.format(i) for i in range(5)] # + min_cluster_size = minc = 3 min_samples = 3 clusterer = hdbscan.HDBSCAN(min_cluster_size=minc,min_samples=min_samples,gen_min_span_tree=True) wid = 0.1 dim = 4 nave = 40 xx = list(range(2,16)) widrange = np.arange(0.01,0.05,0.01) for i in range(len(widrange)): val = [] wid = widrange[i] for dim in xx: valtmp=[] for _ in range(nave): mn1 = np.full(dim,-1) mn2 = np.full(dim,1) dst = dist(mn1,mn2) # dist() = scikit.spatial.distance.euclidean() mn1 = [x/dst for x in mn1 ] mn2 = [x/dst for x in mn2] covar = np.zeros((dim,dim)) for i in range(len(covar)): covar[i][i] = wid foo1 = mnorm(mn1,covar,100) foo2 = mnorm(mn2,covar,100) foo = np.concatenate((foo1,foo2)) fool = clusterer.fit_predict(foo) valtmp.append(hdbscan.validity.validity_index(foo, fool)) val.append(np.mean(valtmp)) plt.plot(xx,val) plt.xlabel('dimension') plt.ylabel('validity') plt.title('Validty: ave of 40 clusterings, varying width of clusters'); strwid=[] for i in range(len(widrange)): strwid.append('wid = {}'.format(widrange[i])) plt.legend(strwid); # + # functional form guess fitting data 1 (John) def rescale1(v0,d): """ functional form of correction factor using simple formula: assuming known validity v0 at dim=2""" v1 = 1/(1-v0) df = float(d) return v1/(v1+df/2-1) xl = range(2,16) yl = np.transpose(np.array([[v0*rescale1(v0,dim) for dim in range(2,16)] for v0 in np.linspace(0.8,0.3,4)])) plt.plot(xl,yl) plt.ylim((0,0.8)) # + # functional form guess fitting data 2 (John) # weaker form to deliberately underestimate correction dependence on dimension def rescale2(v0,d): """ functional form of correction factor using simple formula: assuming known validity v0 at dim=2""" v1 = 1/(1-v0) df = float(d) return v1/(v1+np.log(df/2)) xl = range(2,16) yl = np.transpose(np.array([[v0*rescale2(v0,dim) for dim in range(2,16)] for v0 in np.linspace(0.8,0.3,4)])) plt.plot(xl,yl) plt.ylim((0,0.8)) # - # **Conclusions:** # # * There is an effect of dimension on validity. # * For clusters with centers having same Euclidean distance, the measured validity goes down # * For an intermediate cluster size, validity goes from ~0.6 to ~0.15 as dim goes from 2 to 15. # * But actual curves are nonlinear... # * Bottom line: higher dimension validities should be boosted for equivalence to low dimensional validities. # * using intermediate wid=0.2... # * v=0.2 for d=10 should be boosted by factor of 3x to get to equivalent validity measured for d=2. # * v=0.3 for d=6 should be boosted by factor of 1.5-2x. # * Maybe something like: v_corrected = v*(3/8)*(d-2) # * Or perhaps (John) best to use the fitted form rescale above: yielding v(d): v(d) = v(2) /(1+(d/2-1)/(1-v(2))) # * Or more conservatively logarithmic corerction of rescale2
Notebooks/covid-19-caution/Calibration of validity.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np # %matplotlib inline import matplotlib.pyplot as plt from utils import load_preprocessed_file,get_monthly_averages,\ compute_anomalies, plot_anomaly_graph BUOYNO=42040 df=load_preprocessed_file(BUOYNO) df = df[df.index.map(lambda x:x.year)!=1995] # drop 1995, which only has december df.head(6) monthly=get_monthly_averages(df) monthly.head() all_months_air = compute_anomalies(monthly, 'ATMP') all_months_air.head() air_slope = plot_anomaly_graph(BUOYNO, 'air', all_months_air) all_months_water = compute_anomalies(monthly, 'WTMP') all_months_water.head() water_slope = plot_anomaly_graph(BUOYNO, 'water', all_months_water) from dataworkspaces.kits.jupyter import NotebookLineageBuilder with NotebookLineageBuilder('../results', step_name='anomaly-analysis-buoy-42040', run_description="compute air and water anomaly for buoy %s" % BUOYNO)\ .with_parameters({'buoy':BUOYNO})\ .with_input_path('../intermediate-data/processed_%s.csv.gz'%BUOYNO)\ .eval() as lineage: lineage.write_results({'air_slope':round(air_slope,3), 'water_slope':round(water_slope, 3), 'units':'degrees C per decade'}) print("Results:") print(" Air slope: %.3f degrees C per decade" % air_slope) print(" Water slope: %.3f degrees C per decade" % water_slope)
code/anomaly-analysis-buoy-42040.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # # Grid # # The functions here are used often when dealing with 2D grids (like in TicTacToe). # # ### Distance # # The function returns the Euclidean Distance between two points in the 2D space. # + deletable=true editable=true import math def distance(a, b): """The distance between two (x, y) points.""" return math.hypot((a[0] - b[0]), (a[1] - b[1])) # + [markdown] deletable=true editable=true # For example: # + deletable=true editable=true print(distance((1, 2), (5, 5))) # + [markdown] deletable=true editable=true # ### Distance Squared # # This function returns the square of the distance between two points. # + deletable=true editable=true def distance_squared(a, b): """The square of the distance between two (x, y) points.""" return (a[0] - b[0])**2 + (a[1] - b[1])**2 # + [markdown] deletable=true editable=true # For example: # + deletable=true editable=true print(distance_squared((1, 2), (5, 5))) # + [markdown] deletable=true editable=true # ### Vector Clip # # With this function we can make sure the values of a vector are within a given range. It takes as arguments three vectors: the vector to clip (`vector`), a vector containing the lowest values allowed (`lowest`) and a vector for the highest values (`highest`). All these vectors are of the same length. If a value `v1` in `vector` is lower than the corresponding value `v2` in `lowest`, then we set `v1` to `v2`. Similarly we "clip" the values exceeding the `highest` values. # + from utils import clip def vector_clip(vector, lowest, highest): """Return vector, except if any element is less than the corresponding value of lowest or more than the corresponding value of highest, clip to those values.""" return type(vector)(map(clip, vector, lowest, highest)) # - # For example: print(vector_clip((-1, 10), (0, 0), (9, 9))) # The vector we wanted to clip was the tuple (-1, 10). The lowest allowed values were (0, 0) and the highest (9, 9). So, the result is the tuple (0,9).
grid.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.4.2 # language: julia # name: julia-1.4 # --- # In this example we will show how to model a conditional dosing using the `DiscreteCallbacks`. The problem is as follows. The patient has a drug `A(t)` in their system. The concentration of the drug is given as `C(t)=A(t)/V` for some volume constant `V`. At `t=4`, the patient goes to the clinic and is checked. If the concentration of the drug in their body is below `4`, then they will receive a new dose. # # For our model, we will use the simple decay equation. We will write this in the in-place form to make it easy to extend to more complicated examples: using DifferentialEquations function f(du,u,p,t) du[1] = -u[1] end u0 = [10.0] const V = 1 prob = ODEProblem(f,u0,(0.0,10.0)) # Let's see what the solution looks like without any events. sol = solve(prob,Tsit5()) using Plots; gr() plot(sol) # We see that at time `t=4`, the patient should receive a dose. Let's code up that event. We need to check at `t=4` if the concentration `u[1]/4` is `<4`, and if so, add `10` to `u[1]`. We do this with the following: condition(u,t,integrator) = t==4 && u[1]/V<4 affect!(integrator) = integrator.u[1] += 10 cb = DiscreteCallback(condition,affect!) # Now we will give this callback to the solver, and tell it to stop at `t=4` so that way the condition can be checked: sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) using Plots; gr() plot(sol) # Let's show that it actually added 10 instead of setting the value to 10. We could have set the value using `affect!(integrator) = integrator.u[1] = 10` println(sol(4.00000)) println(sol(4.000000000001)) # Now let's model a patient whose decay rate for the drug is lower: function f(du,u,p,t) du[1] = -u[1]/6 end u0 = [10.0] const V = 1 prob = ODEProblem(f,u0,(0.0,10.0)) sol = solve(prob,Tsit5()) using Plots; gr() plot(sol) # Under the same criteria, with the same event, this patient will not receive a second dose: sol = solve(prob,Tsit5(),tstops=[4.0],callback=cb) using Plots; gr() plot(sol)
notebook/models/02-conditional_dosing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # GVA Data Access Lab # # This lab is to help build a familiarity with accessing data on the GVA platform. # # A Python library is maintained to assist with working with GVA, it is not available on PyPI but can pip installed using the following command: # !pip install --upgrade git+https://github.com/gva-jjoyce/gva_data # We are primarily going to use two components in the GVA library; the `Reader` and `dictset`. # # The `Reader` component helps with reading data, taking care of activities like uncompressing compressed data, joining data across partitions and searching across multiple days. `dictset` helps processing data. # # Once read, data can be loaded into `Pandas` to perform analysis. Whilst this is valid approach for most datasets, Pandas is memory intensive so some data may never be able to be loaded into Pandas or may need some treatment before loading, this is the purpose of the `dictset` component. # # Additional information on these libraries is available in the GitHub repo: # # https://github.com/gva-jjoyce/gva_data/blob/main/docs/gva.data.readers.md # # https://github.com/gva-jjoyce/gva_data/blob/main/docs/gva.data.formats.dictset.md from gva.data.readers import Reader from gva.data.formats import dictset from IPython.display import HTML, display # The data in part one of this lab is from [FiveThirtyEight](https://fivethirtyeight.com/), the source data is available on the [FiveThirtyEight's GitHub account](https://github.com/fivethirtyeight/data/tree/master/star-wars-survey) and was the data used in Walt Hickey's [America’s Favorite ‘Star Wars’ Movies](https://fivethirtyeight.com/features/americas-favorite-star-wars-movies-and-least-favorite-characters/) article. # # To read the data, we use the `Reader` class and give it the path of data, the assigned variable is a _generator_, which behaves a lot like a _list_, but doesn't load the entire dataset into memory all at once. If we want to cycle over the set more than once we need to create a new `Reader` instance each time or convert the _generator_ to a _list_. Because the dataset is quite small, we will just covert the `Reader` to a _list_. # the location of the files is not in this version of the notebook star_wars_survey_results = list(Reader( project='', from_path='')) # Most data on GVA has a README file which includes details of the schema of the file, once we have the data loaded we can display it similar to _Pandas_ using the `dictset.to_html_table` method (there is also a `dictset.to_ascii_table` method). This method with exhaust _generators_ (a new generator will need to be created to use the data) and is intended for exploration only. We've converted our dataset to a _list_ to avoid this issue. display(HTML(dictset.to_html_table(star_wars_survey_results))) # We can see the responses to the first question are 'Yes'/'No', we're going to use `dictset.set_column` to convert these strings to a boolean. `dictset.set_column` takes three paramters: # # - The dictset to act on # - The column to update or create # - A setter, either a fixed value or a Callable to calculate the column # # Although a _lambda_ could be used as the _setter_, we're going to define a function to use as the setter as this is generally more reusable and easier to read and therefore debug. # # This function takes a column name and returns a function, this allows us to provide the name of the field rather than hard-coding the column name and could be used to convert any of ther other 'Yes'/'No' columns. # # We're also going to convert the resultant dataset to a list so we can iterate over it a number of times. # + def yes_no_to_boolean(column): # return a function to be called for each row def process_row(row): return str(row.get(column)).lower() == 'yes' return process_row yes_no_converted = list(dictset.set_column( star_wars_survey_results, 'Have you seen any of the 6 films in the Star Wars franchise?', setter=yes_no_to_boolean('Have you seen any of the 6 films in the Star Wars franchise?'))) display(HTML(to_html_table(yes_no_converted))) # - # ## CHALLENGE ONE # # Your first challenge is to create a new field 'Greedo Shot First' based on the 'Which character shot first?' column. This new column should be set to `True` for every row where the respondent answered 'Greedo' for this column. # # You should call the resulting dataset _greedo_shot_first_, if you have the right answer the _TEST_ statement a few cells down will show success. # + # CHALLENGE ONE TEST if len(list(dictset.select_from(greedo_shot_first, where=lambda row: row['Greedo Shot First']))) == 197: display(HTML("<img src='https://media.giphy.com/media/111ebonMs90YLu/source.gif' width='480' align='center'>")) else: display(HTML("<img src='https://media.giphy.com/media/3ohuPwtVfPsxaMp0QM/giphy.gif' width='480' align='center'>")) # - # Selecting and filtering data is a common activity, simple actions can be done using `dictset`, either before or instead of loading into _Pandas_. # # We're going to count the people who responded that they liked Anakin but that they didn't like Vader. To do this we'll use `dictset.select_from`. This method takes three parameters: # # - The dictset to act on # - columns - the list of columns to select (optional, default is all columns) # - where - a function to filter rows (optional, default is include all records) # # There _where_ parameter can be a _lambda_, but again we'll define a function. # + def likes_anakin_but_not_vader(row): # These are what we're classing as a positive or a negative response positive_responses = ['Somewhat favorably', 'Very favorably'] negative_responses = ['Somewhat unfavorably', 'Very unfavorably'] # Return True where Anakin is positive and Vader is negative # rows that evaluate to True are kept, False are removed return row['<NAME>'] in positive_responses and row['<NAME>'] in negative_responses # Execute the selection against the star_wars data, using the function we defined above who_likes_anakin_but_not_vader = list(dictset.select_from( star_wars_survey_results, where=likes_anakin_but_not_vader)) # Count the number f"{len(who_likes_anakin_but_not_vader)} people reponded favorably about Anakin but unfavorably about Vader" # - # ## CHALLENGE TWO # # The numbers associated with each movie in the dataset is the order the respondent liked each movie, with 1 being their most favorite and 6 their least favorite. # # Your next challenge, and last with the Star Wars dataset, is working out how many people liked 'The Phantom Menace' more than 'The Empire Strikes Back'. You should call your dataset _likes_jarjar_over_yoda_, if you have the right answer the _TEST_ statement a few cells down will show success. # # If you are having trouble, be aware of the spaces in the movie titles. # + # CHALLENGE TWO TEST if len(list(likes_jarjar_over_yoda)) == 214: display(HTML("<img src='https://media.giphy.com/media/oGO1MPNUVbbk4/giphy.gif' width='480' align='center'>")) else: display(HTML("<img src='https://media.giphy.com/media/3ohzdMibqeBjRPX53W/giphy.gif' width='480' align='center'>")) # - # # STILL TO WRITE # # - Filtering on read # - Partitions # - using to filter data # - using to read data from a specific day #
labs/01 Accessing Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## NumPy, Pandas & Visualization - examples # ### BIOINF 575 - Fall 2020 # # # _____ # # <img src = "https://blog.thedataincubator.com/wp-content/uploads/2018/02/Numpypandas.png" width = 300/> # # ____ # #### Array product - vectorized operations import numpy as np import pandas as pd mat1 = np.arange(1,7).reshape(2,3) mat2 = np.array([[10, 11], [20, 21], [30, 31]]) mat1 mat1.T mat2 mat1.T * mat2 # _____ # #### Matrix multiplication # # <img src = "https://miro.medium.com/max/1400/1*YGcMQSr0ge_DGn96WnEkZw.png" width = 370/> # + # matrix multiplication mat1.dot(mat2) # + # matrix multiplication - more recently mat1@mat2 # - # ___ # # #### Combining arrays into a larger array - vstack, hstack, vsplit, hsplit # + ########## mat1 # - mat2 mat2.T # stacking arrays together - vertically vmatrix = np.vstack((mat1, mat2.T)) vmatrix mat1 mat1.T mat2 # stacking arrays together - horizontally hmatrix = np.hstack((mat1.T,mat2)) hmatrix # #### <b>More matrix computation</b> - basic aggregate functions are available - min, max, sum, mean, std # Let's look at our matrix again mat1 # #### Use the axis argument to compute mean for each column or row # - axis = 0 - columns # - axis = 1 - rows # + # compute max for each column # using the array max method (np.ndarray.max) mat1.max(axis = 0) # + # compute sum of rows using the np.sum function np.sum(mat1, axis = 1) # - # #### RESOURCES # # http://scipy-lectures.org/intro/numpy/array_object.html#what-are-numpy-and-numpy-arrays # https://www.python-course.eu/numpy.php # https://numpy.org/devdocs/user/quickstart.html#universal-functions # https://www.geeksforgeeks.org/python-numpy/ # ____ # # #### [Pandas](https://pandas.pydata.org/) is a high-performance library that makes familiar data structures, like `data.frame` from R, and appropriate data analysis tools available to Python users. # #### How does pandas work? # Pandas is built off of [Numpy](http://www.numpy.org/), and therefore leverages Numpy's C-level speed for its data analysis. # * Numpy can only make data structures of a single type. # * Pandas can use many types. # * Think of a table, where each column can be whatever type you want it to be, so long as every item in the column is that same type. # #### Why use pandas? # 1. Data munging/wrangling: the cleaning and preprocessing of data # 2. Loading data into memory from disparate data formats (SQL, CSV, TSV, JSON) # ___ # #### 1. `pd.Series` - One-dimensional** labeled array (or vector) # ```python # # Initialization Syntax # series = pd.Series(data, index, dtype) # ``` # Attributes # # ['T', # 'array', # 'at', # 'axes', # 'base', # 'data', # 'dtype', # 'dtypes', # 'empty', # 'flags', # 'ftype', # 'ftypes', # 'hasnans', # 'iat', # 'iloc', # 'imag', # 'index', # 'is_monotonic', # 'is_monotonic_decreasing', # 'is_monotonic_increasing', # 'is_unique', # 'itemsize', # 'ix', # 'loc', # 'name', # 'nbytes', # 'ndim', # 'plot', # 'real', # 'shape', # 'size', # 'strides', # 'timetuple', # 'values'] # # # Methods # # ['abs', # 'add', # 'add_prefix', # 'add_suffix', # 'agg', # 'aggregate', # 'align', # 'all', # 'any', # 'append', # 'apply', # 'argmax', # 'argmin', # 'argsort', # 'asfreq', # 'asof', # 'astype', # 'at_time', # 'autocorr', # 'between', # 'between_time', # 'bfill', # 'bool', # 'clip', # 'combine', # 'combine_first', # 'convert_dtypes', # 'copy', # 'corr', # 'count', # 'cov', # 'cummax', # 'cummin', # 'cumprod', # 'cumsum', # 'describe', # 'diff', # 'div', # 'divide', # 'divmod', # 'dot', # 'drop', # 'drop_duplicates', # 'droplevel', # 'dropna', # 'duplicated', # 'eq', # 'equals', # 'ewm', # 'expanding', # 'explode', # 'factorize', # 'ffill', # 'fillna', # 'filter', # 'first', # 'first_valid_index', # 'floordiv', # 'ge', # 'get', # 'groupby', # 'gt', # 'head', # 'hist', # 'idxmax', # 'idxmin', # 'infer_objects', # 'interpolate', # 'isin', # 'isna', # 'isnull', # 'item', # 'items', # 'iteritems', # 'keys', # 'kurt', # 'kurtosis', # 'last', # 'last_valid_index', # 'le', # 'lt', # 'mad', # 'map', # 'mask', # 'max', # 'mean', # 'median', # 'memory_usage', # 'min', # 'mod', # 'mode', # 'mul', # 'multiply', # 'ne', # 'nlargest', # 'notna', # 'notnull', # 'nsmallest', # 'nunique', # 'pct_change', # 'pipe', # 'pop', # 'pow', # 'prod', # 'product', # 'quantile', # 'radd', # 'rank', # 'ravel', # 'rdiv', # 'rdivmod', # 'reindex', # 'reindex_like', # 'rename', # 'rename_axis', # 'reorder_levels', # 'repeat', # 'replace', # 'resample', # 'reset_index', # 'rfloordiv', # 'rmod', # 'rmul', # 'rolling', # 'round', # 'rpow', # 'rsub', # 'rtruediv', # 'sample', # 'searchsorted', # 'sem', # 'set_axis', # 'shift', # 'skew', # 'slice_shift', # 'sort_index', # 'sort_values', # 'squeeze', # 'std', # 'sub', # 'subtract', # 'sum', # 'swapaxes', # 'swaplevel', # 'tail', # 'take', # 'to_clipboard', # 'to_csv', # 'to_dict', # 'to_excel', # 'to_frame', # 'to_hdf', # 'to_json', # 'to_latex', # 'to_list', # 'to_markdown', # 'to_numpy', # 'to_period', # 'to_pickle', # 'to_sql', # 'to_string', # 'to_timestamp', # 'to_xarray', # 'transform', # 'transpose', # 'truediv', # 'truncate', # 'tshift', # 'tz_convert', # 'tz_localize', # 'unique', # 'unstack', # 'update', # 'value_counts', # 'var', # 'view', # 'where', # 'xs'] import numpy as np import pandas as pd # Create series from dictionary labels = ["EGFR","IL6","BRAF","ABL"] values = [4,2,3,2] dict_var = dict(zip(labels, values)) new_series = pd.Series(dict_var) new_series new_series.index # check the first few elements - head, the last few - tail new_series.head(2) new_series.tail(2) # #### describe() - Generate descriptive statistics # # # generate descriptive statistics new_series.describe() # _____ # # #### 2. `pd.DataFrame` - Multi-dimensional** labeled data structure with columns of *potentially* different types # ```python # # Initialization Syntax # df = pd.DataFrame(data, index, columns, dtype) # ``` # Attributes # # ['T', # 'at', # 'axes', # 'columns', # 'dtypes', # 'empty', # 'ftypes', # 'iat', # 'iloc', # 'index', # 'ix', # 'loc', # 'ndim', # 'plot', # 'shape', # 'size', # 'style', # 'timetuple', # 'values'] # ___ # #### <font color = "green">Example</font> # # The file "GSE22955_small_gene_table.txt" contains tab-separated data for the normalized gene expresion of about 1200 genes measured for every 3h for 45h to measure the effect of a HER2 inhibitor. # This is a filtered and processed subset of the data available in the Gene Expression Omnibus: # https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE22955 # file_name = "GSE22955_small_gene_table.txt" expression_data = pd.read_csv(file_name, sep = "\t", comment = "#", index_col= 0) expression_data type(expression_data) expression_data.shape # index by column name to get a column expression_data["6"] # subset by a range to get specific rows expression_data[3:7] type(expression_data.columns) expression_data.columns # change column names expression_data.columns = "Hour" + expression_data.columns expression_data.columns print(expression_data) expression_data expression_data.Hour36[4:8] # + # . notation to get columns and vectorized operations to substract data from two columns expr_diff = expression_data.Hour45 - expression_data.Hour0 expr_diff # + # where method to find the position of values that satisfy a certain condition pos = np.where(abs(expr_diff) == max(abs(expr_diff))) # - pos expr_diff[pos[0]] # row standard deviation gene_sd = expression_data.std(axis = 1) gene_sd expression_data[gene_sd > 1.25] # + # add a new column using join - the column has to have a name gene_sd.name = "Gene_sd" expr_sd_data = expression_data.join(gene_sd) expr_sd_data # - expr_sd_data[expr_sd_data.Gene_sd > 1.25] # #### There are 2 pandas-specific methods for indexing: # #### 1. ```.loc``` - primarily label/name-based # #### 2. `.iloc` - primarily integer/position-based # subset dataframe using conditional subsetting and column names gene_var = expr_sd_data.loc[expr_sd_data.Gene_sd > 1.25,"Hour30":] gene_var expr_sd_data.loc["ABCC11":"ABLIM1","Hour30":] gene_var.to_csv("gene_var.txt", sep = "\t", header = True, index = True) expr_data_small = expression_data.iloc[40:45,1:] expr_data_small expression_data.iloc[[4,7,20,44],1:] expr_data_small.loc["ALPL"].plot() expr_data_small.T.plot.box() expr_data_small.T # #### RESOURCES # # https://www.python-course.eu/pandas.phphttps://www.python-course.eu/numpy.php # https://scipy-lectures.org/packages/statistics/index.html?highlight=pandas # https://www.geeksforgeeks.org/pandas-tutorial/ # https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf # <img src="https://pandas.pydata.org/Pandas_Cheat_Sheet.pdf" width=1000/> # ____ # _____ # ## Data Visualization # ____ # # #### `matplotlib` - powerful basic plotting library - pandas plots are matplotlib plots # https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html # # `matplotlib.pyplot` is a collection of command style functions that make matplotlib work like MATLAB. <br> # Each pyplot function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc. # # In `matplotlib.pyplot` various states are preserved across function calls, so that it keeps track of things like the current figure and plotting area, and the plotting functions are directed to the current axes.<br> # "axes" in most places in the documentation refers to the axes part of a figure and not the strict mathematical term for more than one axis). # # # https://github.com/pandas-dev/pandas/blob/v0.25.0/pandas/plotting/_core.py#L504-L1533 # https://matplotlib.org # https://matplotlib.org/tutorials/ # https://github.com/rougier/matplotlib-tutorial # https://www.tutorialspoint.com/matplotlib/matplotlib_pyplot_api.htm # https://realpython.com/python-matplotlib-guide/ # https://github.com/matplotlib/AnatomyOfMatplotlib # https://www.w3schools.com/python/matplotlib_pyplot.asp # http://scipy-lectures.org/intro/matplotlib/index.html # %matplotlib inline import matplotlib.pyplot as plt # Call signatures:: # ``` # plot([x], y, [fmt], data=None, **kwargs) # plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs) # ``` expr_data_small.T # The main usage of `plt` is the `plot()` and `show()` functions # + # Plot the two lists, add axes labels plt.plot(expr_data_small.T, marker = "s") plt.xlabel("Time") plt.ylabel("Expression") plt.legend(expr_data_small.index) plt.xticks(rotation = 90) plt.show() # - # `matplotlib` can use *format strings* to quickly declare the type of plots you want. Here are *some* of those formats: # # |**Character**|**Description**| # |:-----------:|:--------------| # |'--'|Dashed line| # |':'|Dotted line| # |'o'|Circle marker| # |'^'|Upwards triangle marker| # |'b'|Blue| # |'c'|Cyan| # |'g'|Green| # + plt.plot(expr_data_small.loc["AMPH"], '^b--', linewidth=3, markersize=12) plt.xticks(rotation = 90) plt.show() # + plt.plot(expr_data_small.loc["AMPH"], color='blue', marker='^', linestyle='dashed', linewidth=0.5, markersize=5) plt.xticks(rotation = 270) plt.show() # + plt.plot(expr_data_small.loc["AMPH"], '^m--', expr_data_small.loc["ALPL"], 'sg-') plt.xticks(rotation = 90) plt.show() # + # Making a figure - grid layout plt.figure(figsize=(16, 12)) plt.subplot(221) plt.bar(expr_data_small.index, expr_data_small.mean(axis = 1)) plt.xticks(rotation = 90) plt.subplot(222) plt.scatter(expr_data_small.columns, expr_data_small.loc["AMPH"]) plt.scatter(expr_data_small.columns, expr_data_small.loc["ALPL"]) plt.legend(["AMPH","ALPL"]) plt.xticks(rotation = 90) plt.subplot(223) plt.hist(expr_data_small.loc["AMPH"]) plt.hist(expr_data_small.loc["ALPL"]) plt.legend(["AMPH","ALPL"]) axs = plt.subplot(224) axs.violinplot(expr_data_small) axs.set_xticks(range(1,6)) axs.set_xticklabels(expr_data_small.index) plt.xticks(rotation = 90) plt.suptitle('Cool data summary') plt.show() # - # + # help(plt.bar) # - # #### Multiple Plots expr_data_small.T.AMPH.plot(kind='density') expr_data_small.T.ALPL.plot(kind='density') plt.legend() plt.show() # ____________ # # ### `seaborn` - dataset-oriented plotting # Seaborn is a library that specializes in making *prettier* `matplotlib` plots of statistical data. <br> # It is built on top of matplotlib and closely integrated with pandas data structures. # # https://seaborn.pydata.org/introduction.html<br> # https://python-graph-gallery.com/seaborn/ # https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html # https://seaborn.pydata.org/tutorial/distributions.html import seaborn as sns # `seaborn` lets users *style* their plotting environment. sns.set(style='whitegrid') # + #dir(sns) # - colors = ["<= Hour15","<= Hour15","<= Hour15","<= Hour15","<= Hour15", "<= Hour30","<= Hour30","<= Hour30","<= Hour30","<= Hour30", "<= Hour45","<= Hour45","<= Hour45","<= Hour45","<= Hour45"] # + # hue argument allows you to color dots by category sns.scatterplot(x='AMPH',y='ALPL', hue = colors, data=expr_data_small.T) plt.show() # - sns.relplot(x="AMPH", y="ALPL", data=expr_data_small.T, hue = colors) plt.show() df_iris = pd.read_csv('https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv') df_iris.head() sns.relplot(x="petal_length", y="petal_width", col="species", hue="species", style="species", size="species", data=df_iris) plt.show() sns.heatmap(expression_data[gene_sd>1.1], center = 12, cmap = "Oranges") plt.show() # ____ # # ### `plotnine` - grammar of graphics - R ggplot2 in python # plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot. # # Plotting with a grammar is powerful, it makes custom (and otherwise complex) plots are easy to think about and then create, while the simple plots remain simple. # # # https://plotnine.readthedocs.io/en/stable/ # http://cmdlinetips.com/2018/05/plotnine-a-python-library-to-use-ggplot2-in-python/ # https://plotnine.readthedocs.io/en/stable/tutorials/miscellaneous-altering-colors.html # https://datascienceworkshops.com/blog/plotnine-grammar-of-graphics-for-python/ # https://realpython.com/ggplot-python/ # + # # !pip install plotnine # - from plotnine import * pd.melt(expr_data_small.T) ggplot(data=pd.melt(expr_data_small.T)) + geom_boxplot(aes(x = "Symbol", y = "value")) # + # add transparency - to avoid over plotting - alpha argument and change point size # more parameters - scale_x_log10 - transform x axis values to log scale, xlab - add label to x axis ggplot(data=df_iris) +aes(x='petal_length',y='petal_width',color="species") + \ geom_point(size=0.7,alpha=0.7) + facet_wrap('~species',nrow=3) + \ theme(figure_size=(7,7)) + ggtitle("Plot of iris dataset") + \ scale_x_log10() + xlab("Petal Length") + ylab("Petal Width") # + # Set width of bar for histogram and color for the bar line and bar fill color p = ggplot(data=df_iris) + aes(x='petal_length') + geom_histogram(binwidth=1,color='black',fill='grey') p # + # Create a linear regression line that uses the petal length to predict the petal width of the flower # These are broken down in 3 categories by species # The grey area is the 95% confidence level interval for predictions from a linear model ("lm") p = ggplot(df_iris, aes('petal_length', 'petal_width', color='species')) \ + geom_point() \ + stat_smooth(method='lm') # # + facet_wrap('~species')) p # + # Save the plot to a file ggsave(plot=p, filename='iris_linear_model.png') # - # # https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf # <img src = "https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf" width = "1000"/>
completed_notebooks/numpy_pandas_visualization_examples_completed_MoWe.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # metadata: # interpreter: # hash: ac2eaa0ea0ebeafcc7822e65e46aa9d4f966f30b695406963e145ea4a91cd4fc # name: python3 # --- # # Ficha de trabalho 3 # # 1 - Considera o dataset cachexia. Lê os dados usando a função read_csv do pandas (dados e metadados). # + from pandas import read_csv data = read_csv('data_cachexia.csv', sep = ',', index_col = 0) print(data) # - meta = read_csv('meta_cachexia.csv', sep = ',', index_col = 0) print(meta) # 2 - Aplica a transformação necessária para que os dados seja escalonados para ter média 0 e desvio padrão 1. Verifica que as médias de todas as colunas é aproximadamente zero. # + from sklearn import preprocessing input_data = data.values output_data = meta.values[:,0] input_scaled = preprocessing.scale(input_data) print("Global Mean:", input_scaled.mean()) print("Global Standard Deviation:", input_scaled.std()) meanValidation = ((input_scaled.mean(axis = 0) < 0.000001) & (input_scaled.mean(axis = 0) > -0.000001)).all() print('\nThe averages for all columns are approximately zero?', meanValidation) # - # 3 - Cria conjuntos de dados treino e teste para serem usados na criação e validação de modelos de aprendizagem máquina. Considera 30% das amostras para formar o conjunto de teste. # + from sklearn.model_selection import train_test_split train_in, test_in, train_out, test_out = train_test_split(input_scaled, output_data, test_size = 0.3) print('Samples dimensions:\n') print('Train (input):', train_in.shape) print('Test (input):', test_in.shape) print('Train (output):', train_out.shape) print('Test (output):', test_out.shape) # - # 4 - Constroi diversos tipos de modelos de classificação treinando e testando com os conjuntos criados anteriormente. Calcula, para cada um dos modelos no test set, as métricas de erro PECC e F1-score, e calcula a matriz de confusão. Considere como classe positiva a classe "cachexic" # + from sklearn.metrics import accuracy_score, f1_score, confusion_matrix import matplotlib.pyplot as plt labels = ['cachexic', 'control'] def showConfusionMatrix(cm): tn, fp, fn, tp = cm.ravel() matrix = [[tp, fp], [fn, tn]] fig, ax = plt.subplots(1,1) fig.suptitle('Confusion Matrix', fontsize = 20) plt.axis('off') ax.table(cellText = matrix, rowLabels = labels, colLabels = labels, loc = 'center', cellLoc = 'center') # + # K-Nearest Neighbors from sklearn.neighbors import KNeighborsClassifier knn_model = KNeighborsClassifier() knn_model.fit(train_in, train_out) prediction = knn_model.predict(test_in) print('PECC:', accuracy_score(prediction, test_out)) print('F1-score:', f1_score(test_out, prediction, pos_label = 'cachexic')) cm = confusion_matrix(test_out, prediction) showConfusionMatrix(cm) # + # Logistic Regression from sklearn import linear_model lr_model = linear_model.LogisticRegression(solver = 'liblinear', multi_class = 'auto') lr_model.fit(train_in, train_out) prediction = lr_model.predict(test_in) print('PECC:', accuracy_score(prediction, test_out)) print('F1-score:', f1_score(test_out, prediction, pos_label = 'cachexic')) cm = confusion_matrix(test_out, prediction) showConfusionMatrix(cm) # + # Support Vector Machines from sklearn import svm svm_model = svm.SVC(gamma = 'scale') svm_model.fit(train_in, train_out) prediction = svm_model.predict(test_in) print('PECC:', accuracy_score(prediction, test_out)) print('F1-score:', f1_score(test_out, prediction, pos_label = 'cachexic')) cm = confusion_matrix(test_out, prediction) showConfusionMatrix(cm) # - # 5 ~ Usando os 2 modelos anteriores que lhe parecerem mais promissores avalie-os usando validação cruzada. O que conclui do desempenho dos modelos ? # + from sklearn.model_selection import cross_val_score # KNN knn_cv = cross_val_score(knn_model, input_scaled, output_data, cv = 5) print('Cross Validation Score for KNN:', knn_cv.mean()) # SVM svm_cv = cross_val_score(svm_model, input_scaled, output_data, cv = 5) print('Cross Validation Score for SVM:', svm_cv.mean()) # - # 6 - Usando apenas o melhor modelo do exercício anterior, compare o seu desempenho considerando: i) os dados originais; ii) os dados standardizados criados no ex. 2; iii) dados com transformação logaritmica e posteriomente standardizados # + import numpy as np # Original data svm_cv_original = cross_val_score(knn_model, input_data, output_data, cv = 5) print('Original data\n') print('> Cross validation score:', svm_cv_original.mean()) print('\n---\n') # Standardized data svm_cv_stadardized = cross_val_score(knn_model, input_scaled, output_data, cv = 5) print('Standardized Data\n') print('> Cross validation score:', svm_cv_stadardized.mean()) print('\n---\n') # Data with logarithmic transformation input_log = np.log2(input_data) svm_cv_log = cross_val_score(knn_model, input_log, output_data, cv = 5) print('Data with logarithmic transformation\n') print('> Cross validation score:', svm_cv_log.mean()) print('\n---\n') # Data with standardized logarithmic transformation input_log_scaled = preprocessing.scale(input_log) svm_cv_log_scaled = cross_val_score(knn_model, input_log_scaled, output_data, cv = 5) print('Data with standardized logarithmic transformation\n') print('> Cross validation score:', svm_cv_log_scaled.mean()) # - # 7 - Procure o melhor valor para os parâmetros C e gamma no modelo SVM, usando o dataset que tenha funcionado melhor em 6. # + from sklearn.model_selection import GridSearchCV # Dictionary of possible parameters parameters = {'C': [0.001, 0.01, 0.1, 1, 10, 100], 'gamma': [0.0001, 0.001, 0.01, 0.1], 'kernel': ['linear','rbf']} grid = GridSearchCV(svm_model, parameters) grid.fit(input_log_scaled, output_data) print('The best parameters are %s with a score of %s' % (grid.best_estimator_, grid.best_score_))
Classes/Class 2/notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/soma2000-lang/Tic-tac-toe-game/blob/master/tictactoe_game.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="ybnkJ08Ldu_u" import numpy as np import pandas as pd import pickle BOARD_ROWS = 4 BOARD_COLS = 4 BOARD_SIZE = BOARD_ROWS * BOARD_COLS class State: def __init__(self): # the board is represented by an n * n array, # 1 represents a chessman of the player who moves first, # -1 represents a chessman of another player # 0 represents an empty position self.data = np.zeros((BOARD_ROWS, BOARD_COLS)) self.winner = None self.hash_val = None self.end = None # compute the hash value for one state, it's unique def hash(self): if self.hash_val is None: self.hash_val = 0 for i in np.nditer(self.data): self.hash_val = self.hash_val * 3 + i + 1 return self.hash_val # check whether a player has won the game, or it's a tie def is_end(self): if self.end is not None: return self.end results = [] # check row for i in range(BOARD_ROWS): results.append(np.sum(self.data[i, :])) # check columns for i in range(BOARD_COLS): results.append(np.sum(self.data[:, i])) # check diagonals trace = 0 reverse_trace = 0 for i in range(BOARD_ROWS): trace += self.data[i, i] reverse_trace += self.data[i, BOARD_ROWS - 1 - i] results.append(trace) results.append(reverse_trace) for result in results: if result == 3: self.winner = 1 self.end = True return self.end if result == -3: self.winner = -1 self.end = True return self.end # whether it's a tie sum_values = np.sum(np.abs(self.data)) if sum_values == BOARD_SIZE: self.winner = 0 self.end = True return self.end # game is still going on self.end = False return self.end # @symbol: 1 or -1 # put chessman symbol in position (i, j) def next_state(self, i, j, symbol): new_state = State() new_state.data = np.copy(self.data) new_state.data[i, j] = symbol return new_state # print the board def print_state(self): for i in range(BOARD_ROWS): print('-------------') out = '| ' for j in range(BOARD_COLS): if self.data[i, j] == 1: token = '*' elif self.data[i, j] == -1: token = 'x' else: token = '0' out += token + ' | ' print(out) print('-------------') def get_all_states_impl(current_state, current_symbol, all_states): for i in range(BOARD_ROWS): for j in range(BOARD_COLS): if current_state.data[i][j] == 0: new_state = current_state.next_state(i, j, current_symbol) new_hash = new_state.hash() if new_hash not in all_states: is_end = new_state.is_end() all_states[new_hash] = (new_state, is_end) if not is_end: get_all_states_impl(new_state, -current_symbol, all_states) def get_all_states(): current_symbol = 1 current_state = State() all_states = dict() all_states[current_state.hash()] = (current_state, current_state.is_end()) get_all_states_impl(current_state, current_symbol, all_states) return all_states # all possible board configurations all_states = get_all_states() class Judger: # @player1: the player who will move first, its chessman will be 1 # @player2: another player with a chessman -1 def __init__(self, player1, player2): self.p1 = player1 self.p2 = player2 self.current_player = None self.p1_symbol = 1 self.p2_symbol = -1 self.p1.set_symbol(self.p1_symbol) self.p2.set_symbol(self.p2_symbol) self.current_state = State() def reset(self): self.p1.reset() self.p2.reset() def alternate(self): while True: yield self.p1 yield self.p2 # @print_state: if True, print each board during the game def play(self, print_state=False): alternator = self.alternate() self.reset() current_state = State() self.p1.set_state(current_state) self.p2.set_state(current_state) if print_state: current_state.print_state() while True: player = next(alternator) i, j, symbol = player.act() next_state_hash = current_state.next_state(i, j, symbol).hash() current_state, is_end = all_states[next_state_hash] self.p1.set_state(current_state) self.p2.set_state(current_state) if print_state: current_state.print_state() if is_end: return current_state.winner # AI player class Player: # @step_size: the step size to update estimations # @epsilon: the probability to explore def __init__(self, step_size=0.1, epsilon=0.1): self.estimations = dict() self.step_size = step_size self.epsilon = epsilon self.states = [] self.greedy = [] self.symbol = 0 def reset(self): self.states = [] self.greedy = [] def set_state(self, state): self.states.append(state) self.greedy.append(True) def set_symbol(self, symbol): self.symbol = symbol for hash_val in all_states: state, is_end = all_states[hash_val] if is_end: if state.winner == self.symbol: self.estimations[hash_val] = 1.0 elif state.winner == 0: # we need to distinguish between a tie and a lose self.estimations[hash_val] = 0.5 else: self.estimations[hash_val] = 0 else: self.estimations[hash_val] = 0.5 # update value estimation def backup(self): states = [state.hash() for state in self.states] for i in reversed(range(len(states) - 1)): state = states[i] td_error = self.greedy[i] * ( self.estimations[states[i + 1]] - self.estimations[state] ) self.estimations[state] += self.step_size * td_error # choose an action based on the state def act(self): state = self.states[-1] next_states = [] next_positions = [] for i in range(BOARD_ROWS): for j in range(BOARD_COLS): if state.data[i, j] == 0: next_positions.append([i, j]) next_states.append(state.next_state( i, j, self.symbol).hash()) if np.random.rand() < self.epsilon: action = next_positions[np.random.randint(len(next_positions))] action.append(self.symbol) self.greedy[-1] = False return action values = [] for hash_val, pos in zip(next_states, next_positions): values.append((self.estimations[hash_val], pos)) # to select one of the actions of equal value at random due to Python's sort is stable np.random.shuffle(values) values.sort(key=lambda x: x[0], reverse=True) action = values[0][1] action.append(self.symbol) return action def save_policy(self): with open('policy_%s.bin' % ('first' if self.symbol == 1 else 'second'), 'wb') as f: pickle.dump(self.estimations, f) def load_policy(self): with open('policy_%s.bin' % ('first' if self.symbol == 1 else 'second'), 'rb') as f: self.estimations = pickle.load(f) # human interface # input a number to put a chessman # | q | w | e | # | a | s | d | # | z | x | c | class HumanPlayer: def __init__(self, **kwargs): self.symbol = None self.keys = ['q', 'w', 'e', 'a', 's', 'd', 'z', 'x', 'c'] self.state = None def reset(self): pass def set_state(self, state): self.state = state def set_symbol(self, symbol): self.symbol = symbol def act(self): self.state.print_state() key = input("Input your position:") data = self.keys.index(key) i = data // BOARD_COLS j = data % BOARD_COLS return i, j, self.symbol def train(epochs, print_every_n=500): player1 = Player(epsilon=0.01) player2 = Player(epsilon=0.01) judger = Judger(player1, player2) player1_win = 0.0 player2_win = 0.0 for i in range(1, epochs + 1): winner = judger.play(print_state=False) if winner == 1: player1_win += 1 if winner == -1: player2_win += 1 if i % print_every_n == 0: print('Epoch %d, player 1 winrate: %.02f, player 2 winrate: %.02f' % (i, player1_win / i, player2_win / i)) player1.backup() player2.backup() judger.reset() player1.save_policy() player2.save_policy() def compete(turns): player1 = Player(epsilon=0) player2 = Player(epsilon=0) judger = Judger(player1, player2) player1.load_policy() player2.load_policy() player1_win = 0.0 player2_win = 0.0 for _ in range(turns): winner = judger.play() if winner == 1: player1_win += 1 if winner == -1: player2_win += 1 judger.reset() print('%d turns, player 1 win %.02f, player 2 win %.02f' % (turns, player1_win / turns, player2_win / turns)) # The game is a zero sum game. If both players are playing with an optimal strategy, every game will end in a tie. # So we test whether the AI can guarantee at least a tie if it goes second. def play(): while True: player1 = HumanPlayer() player2 = Player(epsilon=0) judger = Judger(player1, player2) player2.load_policy() winner = judger.play() if winner == player2.symbol: print("You lose!") elif winner == player1.symbol: print("You win!") else: print("It is a tie!") if __name__ == '__main__': train(int(1e5)) compete(int(1e3)) play()
tictactoe_game.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Understanding the data and the data layout # + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" import os from os import listdir # - base_path = "../input/plantvillage/PlantVillage/" # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" os.listdir(base_path) # - len(os.listdir(base_path)) from glob import glob imagePatches = glob("../input/plantvillage/PlantVillage/*/*.*", recursive=True) len(imagePatches) imagePatches[0:10] image_path = "../input/plantvillage/PlantVillage/Tomato_Bacterial_spot/3a5a5fef-8a3a-4f70-ab85-eaf5e3ecf6f2___GCREC_Bact.Sp 3449.JPG'" dir_name = os.path.dirname(image_path) len(dir_name.split("/")) dir_name.split("/")[4] # # Image Processing using fastai # + from fastai import * from fastai.vision import * import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import auc,roc_curve from math import floor # - path = Path(base_path) path directory_root = '../input/plantvillage/' image_list, label_list = [], [] try: print("[INFO] Loading images ...") root_dir = listdir(directory_root) for directory in root_dir : # remove .DS_Store from list if directory == ".DS_Store" : root_dir.remove(directory) for plant_folder in root_dir : plant_disease_folder_list = listdir(f"{directory_root}/{plant_folder}") for disease_folder in plant_disease_folder_list : # remove .DS_Store from list if disease_folder == ".DS_Store" : plant_disease_folder_list.remove(disease_folder) for plant_disease_folder in plant_disease_folder_list: print(f"[INFO] Processing {plant_disease_folder} ...") plant_disease_image_list = listdir(f"{directory_root}/{plant_folder}/{plant_disease_folder}/") for single_plant_disease_image in plant_disease_image_list : if single_plant_disease_image == ".DS_Store" : plant_disease_image_list.remove(single_plant_disease_image) for image in plant_disease_image_list[:200]: image_directory = f"{directory_root}/{plant_folder}/{plant_disease_folder}/{image}" if image_directory.endswith(".jpg") == True or image_directory.endswith(".JPG") == True: image_list.append(image_directory) label_list.append(plant_disease_folder) print("[INFO] Image loading completed") except Exception as e: print(f"Error : {e}") image_list[0:10] tfms=get_transforms(flip_vert=True, max_warp=0., max_zoom=0., max_rotate=0.) def get_labels(file_path): dir_name = os.path.dirname(file_path) split_dir_name = dir_name.split("/") dir_levels = len(split_dir_name) label = split_dir_name[dir_levels - 1] return(label) data = ImageDataBunch.from_name_func(path, image_list, label_func=get_labels, size=96, bs=64,num_workers=2,ds_tfms=tfms ).normalize() data.show_batch(rows=3, figsize=(8,8)) learner= cnn_learner(data, models.densenet121, metrics=[accuracy], model_dir='/tmp/models/') learner.lr_find() learner.recorder.plot() lr=1e-1 learner.fit_one_cycle(1, lr) learner.save('model-1') learner.unfreeze() learner.lr_find() learner.recorder.plot() learner.fit_one_cycle(5,slice(1e-5,1e-3)) learner.save('model-2') learner.load('model-2') learner.fit_one_cycle(80,slice(3e-5,3e-3)) learner.recorder.plot_losses() conf= ClassificationInterpretation.from_learner(learner) conf.plot_confusion_matrix(figsize=(10,8)) # + predictions,labels = learner.get_preds(ds_type=DatasetType.Valid) predictions = predictions.numpy() labels = labels.numpy() predicted_labels = np.argmax(predictions, axis = 1) print((predicted_labels == labels ).sum().item()/ len(predicted_labels))
plantvillage-prediction-using-fastai.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns df=pd.read_csv(r'file:///F:\data\adult.csv') df df[df=='?']=np.nan df.isnull().sum() sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='viridis') df.workclass.value_counts() df.workclass = df.workclass.fillna('Private') df.isnull().sum() df.occupation.value_counts() df.occupation= df.occupation.fillna(method='ffill') df.isnull().sum() df["native.country"].value_counts() for col in['occupation','native.country']: df[col].fillna(df[col].mode()[0],inplace=True) x=df.drop(['income'],axis=1) y=df['income'] df.isnull().sum() df from sklearn.model_selection import train_test_split x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) # + from sklearn import preprocessing categorical=['workclass','education','marital.status','occupation','relationship','race','sex','native.country'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # - from sklearn.preprocessing import StandardScaler scaler=StandardScaler() x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) from sklearn.linear_model import LogisticRegression regr = LogisticRegression() regr.fit(x_train, y_train) y_pred = regr.predict(x_test) from sklearn import metrics from sklearn.metrics import accuracy_score print("Accuracy:" , metrics.accuracy_score(y_test,y_pred)) from sklearn.decomposition import PCA pca=PCA() x_train=pca.fit_transform(x_train) pca.explained_variance_ratio_ # We can see that approximately 97.25% of variance is explained by the first 13 variables. # # Only 2.75% of variance is explained by the last variable. So, we can assume that it carries little information. # # So, I will drop it, train the model again and calculate the accuracy. x=df.drop(['income','native.country'],axis=1) y=df['income'] x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) # + from sklearn import preprocessing categorical=['workclass','education','marital.status','occupation','relationship','race','sex'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # - x_train x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) logreg=LogisticRegression() logreg.fit(x_train,y_train) y_pred=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) # + x=df.drop(['income','native.country','hours.per.week'],axis=1) y=df['income'] x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) categorical=['workclass','education','marital.status','occupation','relationship','race','sex'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # - x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) logreg=LogisticRegression() logreg.fit(x_train,y_train) y_pred=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) # + x=df.drop(['income','native.country','hours.per.week','capital.loss'],axis=1) y=df['income'] x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) categorical=['workclass','education','marital.status','occupation','relationship','race','sex'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # + x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) logreg=LogisticRegression() logreg.fit(x_train,y_train) y_pred=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) # + x=df.drop(['income','native.country','hours.per.week','capital.loss','capital.gain','sex'],axis=1) y=df['income'] x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) categorical=['workclass','education','marital.status','occupation','relationship','race'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # + x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) logreg=LogisticRegression() logreg.fit(x_train,y_train) y_pred=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) # + x=df.drop(['income'],axis=1) y=df['income'] x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=0) categorical=['workclass','education','marital.status','occupation','relationship','race','sex','native.country'] for feature in categorical: le=preprocessing.LabelEncoder() x_train[feature]=le.fit_transform(x_train[feature]) x_test[feature]=le.fit_transform(x_test[feature]) # + x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) x_test=pd.DataFrame(scaler.fit_transform(x_test),columns=x.columns) logreg=LogisticRegression() logreg.fit(x_train,y_train) y_pred=logreg.predict(x_test) print(metrics.accuracy_score(y_test,y_pred)) # + x_train=pd.DataFrame(scaler.fit_transform(x_train),columns=x.columns) pcs=PCA() pca.fit(x_train) cumsum=np.cumsum(pca.explained_variance_ratio_) dim=np.argmax(cumsum>=0.90)+1 print('The number of dimension required to preserve 90% of variance is',dim) # - plt.figure(figsize=(8,6)) plt.plot(np.cumsum(pca.explained_variance_ratio_)) plt.xlim(0,14,1) plt.xlabel('Number of components') plt.ylabel('Cumulative explained variance') plt.show()
Adult.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exercise 1: Make a function class # + class F: def __init__(self, a, w): self.a, self.w = a, w def __call__(self, x): from math import exp, sin return exp(-self.a * x) * sin(self.w * x) f = F(a=1, w=0.1) from math import pi f(pi) # - f.a = 2 f(pi) # # Exercise 2: Add a data attribute to a class # + class Account(object): def __init__(self, name, account_number, initial_amount): self.name = name self.no = account_number self.balance = initial_amount self.transactions = 0 def deposit(self, amount): self.balance += amount self.transactions += 1 def withdraw(self, amount): self.balance -= amount self.transactions += 1 def dump(self): print("%s, %s, balance: %s, transactions: %d" % (self.name, self.no, self.balance, self.transactions)) @staticmethod def test(): a = Account("H.P.L.", "121344312", 1000) a.withdraw(3000) a.deposit(20) assert a.transactions == 2 Account.test() # - # # Exercise 3: Add functionality to a class # + from datetime import datetime as dt import dateutil.parser as dup class Account(object): def __init__(self, name, account_number, initial_amount): self._name = name self._no = account_number self._transactions = {str(dt.utcnow()): initial_amount} def deposit(self, amount): self._transactions[str(dt.utcnow())] = amount def withdraw(self, amount): self._transactions[str(dt.utcnow())] = -amount def get_balance(self): return sum(self._transactions.values()) def print_transactions(self): for time, amount in self._transactions.items(): print(f"%s: %6.f" % (dup.parse(time).ctime(), amount)) def dump(self): print("%s, %s, balance: %s" % (self._name, self._no, self.get_balance())) # - import time a = Account("Onno", "112312535", 20) a.deposit(333) time.sleep(1) a.withdraw(34) time.sleep(1) a.deposit(90) time.sleep(1) a.withdraw(100) time.sleep(1) a.print_transactions() a.dump() # + # time stuff import time s = time.time() # seconds since epoch print(s) print(time.ctime()) # print current time print(time.ctime(2*s)) # print time t l = time.localtime(s) # convert seconds to time_struct (local), s is optional print(l) print(time.gmtime(s)) # convert seconds to time_struct (adjust to gmt), s is optional print(time.mktime(l)) # convert time_struct to seconde (local), s is optional p = time.strftime("time: %a, %d %b %Y %H:%M:%S", l) # formatted string from time_struct, l is optional print(p) print(time.strptime(p, "time: %a, %d %b %Y %H:%M:%S")) # make time_struct from formatted string print(time.strptime(time.ctime())) # ctime format is default format for strptime from datetime import datetime as dt print(dt.now()) # local datetime d = dt.utcnow() # utc datetime print(d) print(d.ctime()) # print datetime (strftime works as well) import dateutil.parser as dup d2 = dup.parse(str(d)) # parse datetime from iso string (datetime.strptime works as well) print(d2.ctime()) print(d.tzinfo) print(d2.tzinfo) # - # # Exercise 7.4: Make classes for a rectangle and a triangle # + class Rectangle: def __init__(self, width, height, corner): self.width, self.height, self.corner = width, height, corner def area(self): return self.width * self.height def perimeter(self): return 2*self.width + 2*self.height @staticmethod def test(): r = Rectangle(2, 3, (0, 0)) import numpy as np assert np.allclose((r.area(), r.perimeter()), (6, 10)) Rectangle.test() # + class Triangle: def __init__(self, vertices): self.x = []; self.y = [] for x, y in vertices: self.x.append(x) self.y.append(y) def area(self): x, y = self.x, self.y return 1/2 * sum(abs(x[i - 1]*y[i] - y[i - 1]*x[i]) for i in range(3)) def perimeter(self): from math import sqrt x, y = self.x, self.y return sum(sqrt((x[i] - x[i - 1])**2 + (y[i] - y[i - 1])**2) for i in range(3)) @staticmethod def test(): import numpy as np t = Triangle([(0, 0), (1, 0), (0, 1)]) assert np.allclose((t.area(), t.perimeter()), (0.5, 3.414213562373095)) Triangle.test() # - # # Exercise 7.5: Make a class for quadratic functions # + class Quadratic: def __init__(self, a, b, c): self.a, self.b, self.c = a, b, c def __call__(self, x): return (self.a * x**2) + self.b*x + self.c def table(self, l, r, n=10): from numpy import linspace print(" x | f(x) ") print("------------+------------") for x in linspace(l, r, n): print(f" {x:10.6f} | {self(x):10.6f} ") def roots(self): from math import sqrt a, b, c = self.a, self.b, self.c d = -b / 2 / a; e = sqrt(b**2 - 4*a*c) return a + e, a - e q = Quadratic(1, 2, -3) q.table(0, 10) print("\nroots:", q.roots()) # - # # Exercise 7.6: Make a class for straight lines # + class Line: def __init__(self, p1, p2): x0, y0 = p1; x1, y1 = p2 a = (y1 - y0) / (x1 - x0) self.__class__.__call__ = lambda self, x: y0 + a*(x - x0) @staticmethod def test(): import numpy as np line = Line((0, -1), (2, 4)) assert np.allclose((line(0.5), line(0), line(1)), (0.25, -1, 1.5)) Line.test() # - # # Exercise 7.7: Flexible handling of function arguments # + class Line: def __init__(self, p1, p2): if all(isinstance(p, tuple) for p in (p1, p2)): x0, y0 = p1; x1, y1 = p2 elif isinstance(p1, tuple): x0, y0 = p1 x1, y1 = x0 + 1, y0 + p2 elif isinstance(p2, tuple): x0, y0 = p2 x1, y1 = x0 + 1, y0 + p1 else: x0, y0 = 0, p1 x1, y1 = x0 + 1, y0 + p2 a = (y1 - y0) / (x1 - x0) self.__class__.__call__ = lambda self, x: y0 + a*(x - x0) @staticmethod def test(): import numpy as np line = Line((0, -1), (2, 4)) assert np.allclose((line(0.5), line(0), line(1)), (0.25, -1, 1.5)) line = Line((0, -1), 5/2) assert np.allclose((line(0.5), line(0), line(1)), (0.25, -1, 1.5)) line = Line(-1, 5/2) assert np.allclose((line(0.5), line(0), line(1)), (0.25, -1, 1.5)) Line.test() # - # # Exercise 7.8: Wrap functions in a class class LagrangeInterpolation: def __init__(self, xp, yp): self.xp, self.yp = xp, yp def __call__(self, x): x = np.asarray(x) k = arange(len(self.xp)) return sum(self.yp[k] * self.L_k(x, k), axis=1) def L_k(self, x, k): xp = self.xp i = empty([len(k) - 1, len(k)], dtype=int) for k_ in k: i[:, k_] = np.delete(k, k_) g = x.reshape(-1, 1, 1) - np.resize(xp[i], (x.size, xp[i].shape[0], xp[i].shape[1])) return prod(g / (xp[k] - xp[i]), axis=1) def plot(self): import matplotlib.pyplot as plt xp = self.xp plt.figure() plt.scatter(xp, self.yp) x = linspace(xp[0], xp[-1], 1001) plt.plot(x, self(x)) plt.show() import numpy as np from numpy import arange, empty, prod, sum, linspace xp = np.linspace(0, 3*np.pi, 15) yp = np.sin(xp) f = LagrangeInterpolation(xp, yp) x = 1.2 print("p_L(%g)=%g" % (x, f(x))) print("sin(%g)=%g" % (x, np.sin(x))) f.plot() # # Exercise 7.9: Flexible handling of function arguments class LagrangeInterpolation: def __init__(self, f, x, n=10): self.xp = np.linspace(x[0], x[1], n) self.yp = f(self.xp) def __call__(self, x): x = np.asarray(x) k = arange(len(self.xp)) return sum(self.yp[k] * self.L_k(x, k), axis=1) def L_k(self, x, k): xp = self.xp i = empty([len(k) - 1, len(k)], dtype=int) for k_ in k: i[:, k_] = np.delete(k, k_) g = x.reshape(-1, 1, 1) - np.resize(xp[i], (x.size, xp[i].shape[0], xp[i].shape[1])) return prod(g / (xp[k] - xp[i]), axis=1) def plot(self): import matplotlib.pyplot as plt xp = self.xp plt.figure() plt.scatter(xp, self.yp) x = linspace(xp[0], xp[-1], 1001) plt.plot(x, self(x)) plt.show() # + from numpy import exp, sin, pi def myfunction(x): return exp(-x/2.0)*sin(x) f = LagrangeInterpolation(myfunction, x=[0, 2*pi], n=11) f.plot() # - # # Exercise 7.10: Deduce a class implementation # + class Hello: def __call__(self, x): return f"Hello, {x}!" def __str__(self): return "Hello, World!" a = Hello() print(a("students")) print(a) # - # # Exercise 7.11: Implement special methods in a classm # + class F: def __init__(self, a, w): self.a, self.w = a, w def __call__(self, x): from math import exp, sin return exp(-self.a * x) * sin(self.w * x) def __str__(self): return "exp(-self.a * x) * sin(self.w * x)" f = F(a=1, w=0.1) from math import pi f(pi) # - f.a = 2 f(pi) print(f) # # Exercise 7.12: Make a class for summation of series def test_Sum(): term = lambda k, x: (-x)**k x = 0.5 S = Sum(term, M=0, N=3) assert abs(S(x) - 5/8) < 1e-12 assert abs(S.term(k=4, x=x) - term(k=4, x=x)) < 1e-12 # + class Sum(): def __init__(self, term, M, N): self.term, self.M, self.N = term, M, N def __call__(self, x): return sum(self.term(k, x) for k in range(self.M, self.N + 1)) test_Sum() # - import matplotlib.pyplot as plt import numpy as np from scipy.special import factorial plt.figure() x = np.linspace(-10, 10, 100) y = Sum(lambda k, x: (-1)**k / factorial(2*k + 1) * x**(2*k + 1), 0, 10)(x) plt.plot(x, y) plt.show() # # Exercise 7.13: Apply a numerical differentitation class pwd # + from package.Y import Y from package.Derivative import Derivative import matplotlib.pyplot as plt import numpy as np y = Y(20) dy = Derivative(y.value) t = np.linspace(0, y.v0/y.g) plt.figure() plt.plot(t, y.value(t)) plt.plot(t, dy(t)) plt.show() # - # # Exercise 7.14: Implement an addition operator class Rope: def __init__(self, knots): self.knots = knots def __add__(self, other): return Rope(self.knots + other.knots + 1) def __str__(self): return str(self.knots) r1 = Rope(2) r2 = Rope(2) r3 = r1 + r2 print(r3) def test_Rope(): r1 = Rope(2) r2 = Rope(2) r3 = r1 + r2 assert r3.knots == 5 # # Exercise 7.15: Implement in-place += and -= operators # + from datetime import datetime as dt import dateutil.parser as dup class Account(object): def __init__(self, name, account_number, initial_amount): self._name = name self._no = account_number self._transactions = {str(dt.utcnow()): initial_amount} def __iadd__(self, amount): self._transactions[str(dt.utcnow())] = amount return self def __isub__(self, amount): self._transactions[str(dt.utcnow())] = -amount return self def get_balance(self): return sum(v for v in self._transactions.values()) def print_transactions(self): for time, amount in self._transactions.items(): print(f"%s: %6.f" % (dup.parse(time).ctime(), amount)) def __str__(self): return "%s, %s, balance: %s" % (self._name, self._no, self.get_balance()) def __repr__(self): return f"Account('{self._name}', {self._no}, {self.get_balance()})" # - import time a = Account("Onno", "112312535", 20) a += 333 time.sleep(1) a -= 34 time.sleep(1) a += 90 time.sleep(1) a -= 100 time.sleep(1) a.print_transactions() print(a) repr(a) b = eval(repr(a)) print(a) print(b) b == a # # Exercise 7.16: Implement a class for numerical differentiation def test_Central(): def f(x): return 1/2 * x**2 df = Central(f) x = 2 print(df(x)) print(x) assert abs(df(x) - x) < 1e-6 class Central: def __init__(self, f, h=1e-6): self.f, self.h = f, h def __call__(self, x): f, h = self.f, self.h return (f(x + h) - f(x - h)) / 2/h test_Central() # + import sympy as sp def table(f, x, h=1e-6): x_list = x x = sp.Symbol('x') df_exact = sp.lambdify(x, sp.diff(f)) df_numeric = Central(sp.lambdify(x, f), h=h) print(" x | Error ") print("-----------+---------------") for x in x_list: print(" {:9.6f} | {:13.6e} ".format(x, df_exact(x) - df_numeric(x))) # - x = sp.Symbol('x') table(2*x*sp.cos(2*x) + sp.sin(2*x), np.linspace(-4, 4, 10)) f = lambda x: 2*x*np.cos(2*x) + np.sin(2*x) x = np.linspace(-5, 5, 100) plt.figure() plt.plot(x, f(x)) plt.show() # # Exercise 7.17: Examine a program # + from math import * class Backward(object): def __init__(self, f, h=1e-9): self.f, self.h = f, h def __call__(self, x): h, f = self.h, self.f return (f(x) - f(x-h))/h # finite difference dsin = Backward(sin) e = dsin(0) - cos(0); print("error:", e) dexp = Backward(exp, h=1e-7) e = dexp(0) - exp(0); print("error:", e) # - # # Exercise 7.18: Modify a class for numerical differentiation # + class Derivative(object): def __init__(self, f, h=1E-5): self._f = f self._h = float(h) def __call__(self, x): f, h = self._f, self._h return (f(x+h) - f(x))/h def set_precision(self, h): self._h = h def get_precision(self): return self._h def test_Derivative(): d = Derivative(lambda x: x) d.set_precision(1e-2) assert d.get_precision() == 1e-2 test_Derivative() # - # # Exercise 7.19: Make a class for the Heaviside function class Heaviside(): import numpy as np def __init__(self, eps=None): self.eps = eps def __call__(self, x): eps = self.eps if eps: from numpy import sin, pi y = 0 y = np.where((-eps <= x) & (x < eps), (1 + x/eps + sin(pi * x / eps)/pi) / 2, y) y = np.where(x >= eps, 1, y) else: y = 1 * (x >= 0) return y def plot(self, xmin, xmax): import matplotlib.pyplot as plt plt.figure() x = np.linspace(xmin, xmax, 200) plt.plot(x, self(x)) plt.show() H = Heaviside() H(0.1) H = Heaviside(eps=0.8) H(0.1) H = Heaviside() x = np.linspace(-1, 1, 11) H(x) H = Heaviside(eps=0.8) H(x) H = Heaviside(eps=1) H.plot(xmin=-4, xmax=4) # # Exercise 7.20: Make a class for the indicator function class Indicator(): import numpy as np def __init__(self, a, b, eps=None): self.eps, self.a, self.b = eps, a, b def __call__(self, x): H = Heaviside(self.eps) y = H(x - self.a) * H(-(x - self.b)) return y def plot(self, xmin, xmax): import matplotlib.pyplot as plt plt.figure() x = np.linspace(xmin, xmax, 200) plt.plot(x, self(x)) plt.show() I = Indicator(-2, 3, eps=1) I.plot(-5, 6) # # Exercise 7.21: Make a class for piecewise constant functions class PiecewiseConstant(): import numpy as np def __init__(self, points, xmax): self.points, self.xmax = points, xmax def __call__(self, x): y = np.nan for v, xi in self.points: y = np.where((xi <= x) & (x < self.xmax), v, y) return y def plot(self): import matplotlib.pyplot as plt plt.figure() x = np.linspace(self.points[0][1], self.xmax, 1000) plt.plot(x, self(x)) plt.show() f = PiecewiseConstant([(0.4, 1), (0.2, 1.5), (0.1, 3)], xmax=4) print(f(1.5), f(1.75), f(4)) x = np.linspace(0, 4, 21) print(f(x)) f.plot() # # Exercise 7.22: Speed up repeated integral calculations # + class Integral(object): def __init__(self, f, a, n=100): self.f, self.a, self.n = f, a, n def __call__(self, x): if isinstance(x, np.ndarray): nk = int(self.n / len(x)) I = [trapezoidal(self.f, self.a, x[0], nk)] for i, xi in enumerate(x[1:]): I.append(I[i] + trapezoidal(self.f, x[i], xi, nk)) return I else: return trapezoidal(self.f, self.a, x, self.n) def trapezoidal(f, a, x, n): dx = (x - a) / n I = dx / 2 * sum(f(a + (k - 1)*dx) + f(a + k*dx) for k in range(n)) return I # - from numpy import sin, pi I = Integral(sin, 0, n=1000) I(np.array([pi/2, pi, 3/2*pi, 2*pi])) # # Exercise 7.23: Apply a class for polynomials class Polynomial(object): def __init__(self, coefficients): self.coeff = coefficients def __call__(self, x): """Evaluate the polynomial.""" s = 0 for i, c in enumerate(self.coeff): s += c*x**i return s def __add__(self, other): """Return self + other as Polynomial object.""" # Start with the longest list and add in the other if len(self.coeff) > len(other.coeff): result_coeff = self.coeff[:] # copy! for i in range(len(other.coeff)): result_coeff[i] += other.coeff[i] else: result_coeff = other.coeff[:] # copy! for i in range(len(self.coeff)): result_coeff[i] += self.coeff[i] return Polynomial(result_coeff) def evaluate(x, N): from math import factorial coef = [1/factorial(k) for k in range(N + 1)] p = Polynomial(coef) print(f"N: {N:2d}, p({x:05.2f}) = {p(x):f}") from math import exp for x in [0.5, 3, 10]: print("exact:", exp(x)) for N in [2, 5, 10, 15, 20, 25]: evaluate(x, N) # # Exercise 7.24: Find a bug in a class for polynomials class Polynomial(object): def __init__(self, coefficients): self.coeff = coefficients def __call__(self, x): return sum([c*x**i for i, c in enumerate(self.coeff)]) def __add__(self, other): maxlength = max(len(self.coeff), len(other.coeff)) # Extend both lists with zeros to this maxlength self.coeff += [0]*(maxlength - len(self.coeff)) other.coeff += [0]*(maxlength - len(other.coeff)) result_coeff = self.coeff for i in range(maxlength): result_coeff[i] += other.coeff[i] return Polynomial(result_coeff) p1 = Polynomial([1, 2, 3]) p2 = Polynomial([0, 4, 0]) (p1 + p2)(3) # # Exercise 7.25: Implement subtraction of polynomials class Polynomial(object): def __init__(self, coefficients): self.coeff = coefficients def __call__(self, x): return sum([c*x**i for i, c in enumerate(self.coeff)]) def __add__(self, other): maxlength = max(len(self.coeff), len(other.coeff)) # Extend both lists with zeros to this maxlength self.coeff += [0]*(maxlength - len(self.coeff)) other.coeff += [0]*(maxlength - len(other.coeff)) result_coeff = self.coeff for i in range(maxlength): result_coeff[i] += other.coeff[i] return Polynomial(result_coeff) def __sub__(self, other): from copy import copy other_ = copy(other) other_.coeff = [-c for c in other_.coeff] return self.__add__(other_) def __str__(self): s = "" for i in range(0, len(self.coeff)): if self.coeff[i] != 0: s += " + %g*x^%d" % (self.coeff[i], i) # Fix layout s = s.replace("+ -", "- ") s = s.replace("x^0", "1") s = s.replace(" 1*", " ") s = s.replace("x^1 ", "x ") if s[0:3] == " + ": # remove initial + s = s[3:] if s[0:3] == " - ": # fix spaces for initial - s = "-" + s[3:] return s p1 = Polynomial([1, 2, 3]) p2 = Polynomial([0, 4, 0]) (p1 - p2).coeff # # Exercise 7.26: Test the functionality of pretty print of polynomials print(p1 - p2) # looks fine. # # Exercise 7.27: Vectorize a class for polynomials class Polynomial(object): import numpy as np def __init__(self, coefficients): self.coeff = np.asarray(coefficients) def __call__(self, x): return self.coeff @ x**np.arange(len(self.coeff)) def __add__(self, other): coeffs = sorted([self.coeff, other.coeff], key=len) r = coeffs[0] + coeffs[1][:len(coeffs[0])] r = np.append(r, coeffs[1][len(r):]) return Polynomial(r) def __sub__(self, other): from copy import copy other_ = copy(other) other_.coeff = [-c for c in other_.coeff] return self.__add__(other_) def __str__(self): s = "" for i in range(0, len(self.coeff)): if self.coeff[i] != 0: s += " + %g*x^%d" % (self.coeff[i], i) # Fix layout s = s.replace("+ -", "- ") s = s.replace("x^0", "1") s = s.replace(" 1*", " ") s = s.replace("x^1 ", "x ") if s[0:3] == " + ": # remove initial + s = s[3:] if s[0:3] == " - ": # fix spaces for initial - s = "-" + s[3:] return s p1 = Polynomial([1, 2, 3, 5, 7]) p2 = Polynomial([0, 4, 0, 1]) (p1 + p2).coeff (p1 + p2)(2) # # Exercise 7.28: Use a dict to hold polynomial coefficients class Polynomial: import numpy as np def __init__(self, coefficients): self.coeff = coefficients def __call__(self, x): return sum(c * x**i for i, c in self.coeff.items()) def __add__(self, other): cs = self.coeff.copy() for c in other.coeff: if c in cs: cs[c] += other.coeff[c] else: cs[c] = other.coeff[c] return Polynomial(cs) def __sub__(self, other): from copy import copy other_ = copy(other) other_.coeff = {k: -c for k, c in other_.coeff.items()} return self.__add__(other_) def __mul__(self, other): from collections import defaultdict cs = defaultdict(float) for i, c in self.coeff.items(): for ii, cc in other.coeff.items(): cs[i + ii] += c * cc return Polynomial(dict(cs)) @staticmethod def test(): # test __call__ p1 = Polynomial({4: 1, 2: -2, 0: 3}) assert abs(p(2) - 11) < 1e-12 # test __add__ p2 = Polynomial({0: 1, 3: 1}) p3 = p1 + p2 assert set(p3.coeff.keys()) == {0, 2, 3, 4} assert np.allclose([p3.coeff[k] for k in sorted(p3.coeff)], [4, -2, 1, 1]) # test __mul__ p4 = Polynomial({1: -2, 2: 3}) p5 = p2 * p4 assert set(p5.coeff.keys()) == {1, 2, 4, 5} assert np.allclose([p5.coeff[k] for k in sorted(p5.coeff)], [-2, 3, -2, 3]) p = Polynomial({4: 1, 2: -2, 0: 3}) p(2) p1 = Polynomial({0: 1, 3: 1}) p2 = Polynomial({1: -2, 2: 3}) (p1 * p2).coeff Polynomial.test() # # Exercise 7.29: Extend class Vec2D to work with lists/tuples class Vec2D: def __init__(self, x, y): self.x = x self.y = y def __add__(self, other): if isinstance(other, Vec2D): return Vec2D(self.x + other.x, self.y + other.y) else: return Vec2D(self.x + other[0], self.y + other[1]) def __radd__(self, other): return self.__add__(other) def __sub__(self, other): if isinstance(other, Vec2D): return Vec2D(self.x - other.x, self.y - other.y) else: return Vec2D(self.x - other[0], self.y - other[1]) def __rsub__(self, other): if isinstance(other, Vec2D): return Vec2D(other.x - self.x, other.y - self.y) else: return Vec2D(other[0] - self.x, other[1] - self.y) def __mul__(self, other): return self.x*other.x + self.y*other.y def __abs__(self): return math.sqrt(self.x**2 + self.y**2) def __eq__(self, other): return self.x == other.x and self.y == other.y def __str__(self): return "(%g, %g)" % (self.x, self.y) u = Vec2D(-2, 4) v = u + (1, 1.5) w = [-3, 2] - v # # Exercise 7.30: Extend class Vec2D to 3D vectors class Vec3D: def __init__(self, x, y, z): self.x = x self.y = y self.z = z def __add__(self, other): return Vec3D(self.x + other.x, self.y + other.y, self.z + other.z) def __sub__(self, other): return Vec3D(self.x - other.x, self.y - other.y, self.z - other.z) def __mul__(self, other): return self.x*other.x + self.y*other.y + self.z*other.z def __abs__(self): return math.sqrt(self.x**2 + self.y**2 + self.z**2) def __eq__(self, other): return self.x == other.x and self.y == other.y and self.z == other.z def __str__(self): return "(%g, %g, %g)" % (self.x, self.y, self.z) def cross(self, other): return Vec3D(self.y*other.z - self.z*other.y, self.z*other.x - self.x*other.z, self.x*other.y - self.y*other.x) print(Vec3D(0, 1, 0).cross(Vec3D(1, 0, 0))) # # Exercise 7.31: Use NumPy arrays in class Vec2D class Vec: import math def __init__(self, *vec): self.v = np.asarray(vec).flatten() def __add__(self, other): return Vec(self.v + other.v) def __sub__(self, other): return Vec(self.v - other.v) def __mul__(self, other): return self.v @ other.v def __abs__(self): return math.sqrt(sum(c**2 for c in self.v)) def __eq__(self, other): return (self.v == other.v).all() v1 = v2 = Vec([1, 2, 3]) v1 * v2 # # Exercise 7.32: Impreciseness of interval arithmetics class Interval(object): def __init__(self, lower, upper): self.lo = float(lower) self.up = float(upper) def __add__(self, other): if not isinstance(other, Interval): other = Interval(other, other) a, b, c, d = self.lo, self.up, other.lo, other.up return Interval(a + c, b + d) def __radd__(self, other): return self.__add__(other) def __sub__(self, other): a, b, c, d = self.lo, self.up, other.lo, other.up return Interval(a - d, b - c) def __mul__(self, other): a, b, c, d = self.lo, self.up, other.lo, other.up return Interval(min(a*c, a*d, b*c, b*d), max(a*c, a*d, b*c, b*d)) def __truediv__(self, other): a, b, c, d = self.lo, self.up, other.lo, other.up if c * d <= 0: raise ValueError("Interval %s cannot be denominator because it contains zero" % other) return Interval(min(a/c, a/d, b/c, b/d), max(a/c, a/d, b/c, b/d)) def __str__(self): return "[%g, %g]" % (self.lo, self.up) x = Interval(1, 2) print(x / (1 + x)) # # Exercise 7.33: Make classes for students and courses # + class Student: def __init__(self, name, courses): self.__dict__.update({k: v for k, v in locals().items() if k != 'self'}) def __str__(self): s = f"Name: {self.name}\n" for c in self.courses: s += str(c) + "\n" return s class Course: def __init__(self, title, semester, credits, grade): self.__dict__.update({k: v for k, v in locals().items() if k != 'self'}) def __str__(self): return f"{self.title:30} {self.semester:11} {self.credits:2} {self.grade}" # - print(Student("<NAME>", [Course("Astronomy", "2003 fall", 10, "A"), Course("Quantum Mechanics II", "2005 spring", 5, "C")])) # # Exercise 7.34: Find local and global extrema of a function class MinMax: import numpy as np def __init__(self, f, a, b, n): self.__dict__.update({k: v for k, v in locals().items() if k != 'self'}) self._find_extrema() def _find_extrema(self): f, a, b = self.f, self.a, self.b self.Pmin, self.Pmax, self.Fmin, self.Fmax = Pmin, Pmax, Fmin, Fmax = [], [], [], [] x = np.linspace(a, b, self.n) for i, xi in enumerate(x[1:-1]): if f(x[i]) < f(xi) > f(x[i + 2]): Pmax.append(xi) Fmax.append(f(xi)) elif f(x[i]) > f(xi) < f(x[i + 2]): Pmin.append(xi) Fmin.append(f(xi)) if f(a) > f(x[1]): Pmax.insert(1, a) Fmax.insert(1, f(a)) elif f(a) < f(x[1]): Pmin.insert(1, a) Fmin.insert(1, f(a)) if f(b) > f(x[-2]): Pmax.append(b) Fmax.append(f(b)) elif f(b) < f(x[-2]): Pmin.append(b) Fmin.append(f(b)) def _refine_extrema(self): f, a, b, n = self.f, self.a, self.b, self.n Pmin, Pmax, Fmin, Fmax = self.Pmin, self.Pmax, self.Fmin, self.Fmax for i, p in enumerate(Pmin): if p != a and p != b: df = Derivative(f, h= (b - a) / n**2 / 1000) xmin, dfmin = p, df(p) for x in np.linspace(p - (b - a)/n, p + (b - a)/n, n): # just reuse `n` as precision basis if df(x) < dfmin: xmin, dfmin = x, df(x) Pmin[i] = xmin Fmin[i] = f(xmin) for i, p in enumerate(Pmax): if p != a and p != b: df = Derivative(f, h= (b - a) / n**2 / 1000) xmax, dfmin = p, df(p) for x in np.linspace(p - (b - a)/n, p + (b - a)/n, n): # just reuse `n` as precision basis if df(x) < dfmin: xmax, dfmin = x, df(x) Pmax[i] = xmax Fmax[i] = f(xmax) def get_global_minimum(self): return min(self.get_all_minima(), key=lambda t: t[1]) def get_global_maximum(self): return max(self.get_all_maxima(), key=lambda t: t[1]) def get_all_minima(self): return [(x, y) for x, y in zip(self.Pmin, self.Fmin)] def get_all_maxima(self): return [(x, y) for x, y in zip(self.Pmax, self.Fmax)] def __str__(self): from textwrap import dedent as dd return dd(f""" All minima: {', '.join(f'{p:.4f}' for p, v in self.get_all_minima())} All maxima: {', '.join(f'{p:.4f}' for p, v in self.get_all_maxima())} Global minimum: {self.get_global_minimum()[0]} Global maximum: {self.get_global_maximum()[0]} """) from math import exp, sin, pi mm = MinMax(f=lambda x: x**2 * exp(-0.2 * x) * sin(2 * pi * x), a=0, b=4, n=5001) print(mm) mm._refine_extrema() print(mm) # # Exercise 7.35: Find the optimal production for a company f = lambda x, y: 45*x + 14*y fa = lambda x, alpha: alpha/14 - 45*x/14 import matplotlib.pyplot as plt from numpy import linspace x = linspace(0, 20, 1000) plt.figure() plt.fill_between(x, 100 - 2*x, alpha=0.3) plt.fill_between(x, 80/3 - 5/3*x, alpha=0.3) plt.fill_between(x, 150/4, alpha=0.3) for alpha in [1, 100, 1000, 500, 700]: plt.plot(x, fa(x, alpha), label=f"alpha = {alpha}") plt.ylabel("y") plt.xlabel("x") plt.legend() plt.show() f(x=16, y=0) # optimal solution. # + import sympy as sp x, y = sp.symbols('x y') conditions = [ "2*x + y <= 100", "5*x + 3*y <= 80", " 4*y <= 150", " x >= 0", " y >= 0" ] # Compute the intersections intersections = [] eqs = [ sp.Eq(eval(a), eval(b)) for a, b in [e.split("=") for e in [c.replace("<=", "=").replace(">=", "=") for c in conditions] ]] for e1 in eqs: for e2 in eqs: i = sp.solve([e1, e2]) if len(i) > 1 and i not in intersections: intersections.append(i) # Check conditions on intersections corners = [] for i in intersections: corners.append(tuple(i.values())) x, y = sp.symbols('x y') x, y = i[x], i[y] for c in conditions: if not eval(c): del corners[-1] break # - corners f = lambda c: 45*c[0] + 14*c[1] x, y = max(corners, key=f) print(x, y) # yay!
Exercises 7.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np import matplotlib.pyplot as plt import json import os from sklearn.model_selection import train_test_split from sklearn.model_selection import StratifiedKFold # ## Make split vid_path = "../../data/pocus_videos/convex" class_short = ["cov", "pne", "reg"] vid_files = [v for v in os.listdir(vid_path) if v[:3].lower() in class_short] labels = [vid[:3].lower() for vid in vid_files] train_files,test_files, train_labels, test_labels = train_test_split(vid_files, labels, stratify=labels) np.unique(train_labels, return_counts=True) np.unique(test_labels, return_counts=True) # + MY_FR = 5 DATA_SIZE = 5 data_3d = [] labels_3d = [] files_3d = [] for train_vid, train_lab in zip(test_files, test_labels): cap = cv2.VideoCapture(os.path.join(vid_path, train_vid)) fr = cap.get(5) show_every = round(fr/MY_FR) print(train_vid, fr, cap.get(7), "available frames:", cap.get(7)/show_every) frames_available = cap.get(7)/show_every end_is_close = frames_available % DATA_SIZE >= 4 number_selected = int(end_is_close) + frames_available//DATA_SIZE print(number_selected, cap.get(7), "show every", show_every) current_data = [] # for frame_id in range(int(cap.get(7))): while cap.isOpened(): frame_id = cap.get(1) ret, frame = cap.read() if (ret != True): break # plt.imshow(image) # plt.show() image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) image = cv2.resize(image, (224, 224)) if frame_id%show_every==0 or (end_is_close and frame_id==int(cap.get(7)-1)): current_data.append(image) if len(current_data)==DATA_SIZE: data_3d.append(current_data) labels_3d.append(train_lab) files_3d.append(train_vid) current_data = [] cap.release() # - np.asarray(data_3d).shape import pickle with open("../../data/vid_class_test.dat", "wb") as outfile: pickle.dump((data_3d, labels_3d, files_3d), outfile) # ## 10 fold vid_files = [v for v in os.listdir(vid_path) if v[:3].lower() in class_short] labels = [vid[:3].lower() for vid in vid_files] # + X = np.array(vid_files) y = np.array(labels) skf = StratifiedKFold(n_splits=5, shuffle=True) skf.get_n_splits(X, y) video_cross_val = {} for fold, (train_index, test_index) in enumerate(skf.split(X, y)): print("TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] print(np.unique(y_train, return_counts=True)) train_test_dict = {} train_test_dict["train"] = (X_train.tolist(), y_train.tolist()) train_test_dict["test"] = (X_test.tolist(), y_test.tolist()) video_cross_val[fold] = train_test_dict # - with open("../../data/video_input_data/cross_val.json", "w") as outfile: json.dump(video_cross_val, outfile) a = [video_cross_val[i]["test"][0] for i in range(5)] a = [e for b in a for e in b ] assert len(a)==len(np.unique(a)) # ### Cross val from cross-validation folder # + check = "/Users/ninawiedemann/Desktop/Projects/covid19_pocus_ultrasound.nosync/data/cross_validation" videos_dir = "/Users/ninawiedemann/Desktop/Projects/covid19_pocus_ultrasound.nosync/data/pocus_videos/convex" file_list = [] video_cross_val = {} for split in range(5): train_test_dict = {"test":[[],[]], "train":[[],[]]} for folder in os.listdir(check): if folder[0]==".": continue for classe in os.listdir(os.path.join(check, folder)): if classe[0]=="." or classe[0]=="u": continue uni = [] for file in os.listdir(os.path.join(check, folder, classe)): if file[0]=="." or len(file.split("."))==2: continue parts = file.split(".") if not os.path.exists(os.path.join(videos_dir, parts[0]+"."+parts[1][:3])): butterfly_name = parts[0][:3]+"_Butterfly_"+parts[0][4:]+".avi" if not os.path.exists(os.path.join(videos_dir,butterfly_name)): print("green dots in video or aibronch", file) continue uni.append(butterfly_name) else: uni.append(parts[0]+"."+parts[1][:3]) uni_files_in_split = np.unique(uni) uni_labels = [vid[:3].lower() for vid in uni_files_in_split] if folder[-1]==str(split): train_test_dict["test"][0].extend(uni_files_in_split) train_test_dict["test"][1].extend(uni_labels) else: train_test_dict["train"][0].extend(uni_files_in_split) train_test_dict["train"][1].extend(uni_labels) video_cross_val[split] = train_test_dict # - for fold, (train_index, test_index) in enumerate(skf.split(X, y)): print("TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] print(np.unique(y_train, return_counts=True)) train_test_dict = {} train_test_dict["train"] = (X_train.tolist(), y_train.tolist()) train_test_dict["test"] = (X_test.tolist(), y_test.tolist()) video_cross_val[fold] = train_test_dict # ### Script for butterfly data to videos from skvideo import io from pocovidnet.utils_butterfly_data import * butterfly_dir = "../../data/butterfly" out_dir = "../../data/butterfly_test" actual_names, labels = get_paths("../../data/pocovid_data.csv") # manually add the ones which I know are in the data files_to_process, labs_to_process = get_processing_info( butterfly_dir, actual_names, labels ) del_upper = 100 for i in range(1, len(files_to_process)): vid_arr = [] fp = files_to_process[i] fn = fp.split(os.sep)[-1] cap = cv2.VideoCapture(fp) # capturing the video from the given path # frame rate n_frames = cap.get(7) frameRate = cap.get(5) out_path = os.path.join(out_dir, label_to_dir(labs_to_process[i]).split(os.sep)[1][:3]) print(out_path) print( "PROCESS", fn, labs_to_process[i], "framerate", int(cap.get(5)), "width", cap.get(3), "height", cap.get(4), "number frames:", cap.get(7) ) if os.path.exists(out_path+"_"+fn.split(".")[0]+".mpeg"): print("already done, ", out_path+"_"+fn.split(".")[0]+".mpeg") continue nr_selected = 0 while cap.isOpened(): frameId = cap.get(1) # current frame number ret, frame = cap.read() if not ret: break frame = np.asarray(frame).astype(int) # width_box = np.min(frame.shape[:2]) # crop width_border = int(cap.get(3) * 0.15) width_box = int(cap.get(3)) - 2 * width_border if width_box + del_upper > cap.get(4): width_box = int(cap.get(4)-del_upper) width_border = int(cap.get(3)/2-width_box/2) # print(del_upper, width_box, width_border) frame = frame[del_upper:width_box + del_upper, width_border:width_box + width_border] # print(frame.shape) # frame = frame[width_border:width_box+width_border] # detect green point green_point = frame[:, :, 1] - frame[:, :, 0] # get first frame for green point deletion: if frameId == 0: frame_start = green_point # skip the green moving points if np.any((green_point - frame_start) > 100): plt.imshow(green_point) plt.show() print("VID WITH GREEN DOT") break # delete blue symbol blue_symbol = np.where(green_point < -50) frame[blue_symbol] = frame[0, 0] # delete green symbol if np.any(green_point > 220): green_symbol = np.where(green_point > 50) frame[green_symbol] = frame[0, 0] # resize # print(frame.shape) frame = np.asarray(frame).astype(np.uint8) frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) frame = cv2.resize(frame, (240, 240)) if frameId==0: plt.imshow(frame) plt.show() vid_arr.append(frame) # SAVE # if (frameId % every_x_image == 0): # # storing the frames in a new folder named test_1 # filename = out_path + fn + "_frame%d.jpg" % frameId # cv2.imwrite(filename, frame) # nr_selected += 1 cap.release() vid_arr = np.asarray(vid_arr) # print(out_path, fp, fn) if len(vid_arr)>5: io.vwrite(out_path+"_Butterfly_"+fn.split(".")[0]+".mpeg", vid_arr, outputdict={"-vcodec":"mpeg2video"}) print("DONE", vid_arr.shape) else: print("GREEN DOT:", fn) # ## Double check data data_path = "/Users/ninawiedemann/Desktop/Projects/covid19_pocus_ultrasound.nosync/data/video_input_data/conv3d_train_fold_1.dat" with open( data_path, "rb" ) as infile: X_train, train_labels_text, train_files = pickle.load(infile) X_train.shape np.unique(train_labels_text, return_counts=True) for i, vid in enumerate(X_train): print(train_files[i]) plt.imshow(vid[0, :, :, 0]) plt.show() # ### Find out framerate for vid in os.listdir("../../data/pocus_videos/convex"): if vid[0]==".": continue cap = cv2.VideoCapture("../../data/pocus_videos/convex/"+vid) print(vid, [cap.get(i) for i in range(7)]) print(cap.get(4)) cap.release() # ## Evaluation from pocovidnet.evaluate_video import VideoEvaluator from pocovidnet import VIDEO_MODEL_FACTORY from pocovidnet.videoto3d import Videoto3D from tensorflow.keras import Input, Model from tensorflow.keras.layers import ( Dense, GlobalAveragePooling3D ) NUM_FOLDS = 5 class GenesisEvaluator(): def __init__(self, weights_dir="video_genesis_lr1e4", ensemble=True, split=None, model_id="genesis"): """ Constructor of COVID model evaluator class. Arguments: ensemble {str} -- Whether the model ensemble is used. num_classes: must be 3 or 4, how many classes the model was trained on """ # self.root = os.path.join('/', *DIR_PATH.split('/')[:-1]) self.split = split self.ensemble = ensemble if model_id not in VIDEO_MODEL_FACTORY.keys(): raise ValueError( f'Wrong model {model_id}. Options are:{MODEL_FACTORY.keys()}' ) else: self.model_id = model_id if ensemble: # retores 5 weight paths self.weights_paths = [ os.path.join( weights_dir, 'fold_' + str(fold), "variables", "variables" ) for fold in range(NUM_FOLDS) ] else: if split is None or split < 0 or split > 4: raise ValueError(f'Provide split between 0 and 4, not {split}') self.weights_paths = [ os.path.join( # self.root weights_dir, 'fold_' + str(self.split), "variables", "variables" ) ] self.class_mappings = ['covid', 'pneunomia', 'regular'] # Get Genesis base model base_models = [ VIDEO_MODEL_FACTORY[self.model_id](( 1, 64, 64, 32), batch_normalization=True) for _ in range(len(self.weights_paths)) ] # Get model head self.models = [] for mod in base_models: x = mod.get_layer('depth_7_relu').output x = GlobalAveragePooling3D()(x) x = Dense(1024, activation='relu')(x) output = Dense(len(self.class_mappings), activation='softmax')(x) head_model = Model(inputs=mod.input, outputs=output) self.models.append(head_model) # restore weights try: for model, path in zip(self.models, self.weights_paths): model.load_weights(path) except Exception: raise Exception('Error in model restoring.') print(f'Model restored. Class mappings are {self.class_mappings}') def __call__(self, video_path, width=64, depth=5, fr=5): # read in video vid3d = Videoto3D("",width, width, depth, fr) vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20} X_test, _, fn = vid3d.video3d([video_path], ["cov"]) # cov as dummy label print(X_test.shape) assert len(np.unique(fn))==1 # prepare for genesis input_shape = 42 input_shape = 1, 64, 64, 32 X_test = np.transpose(X_test, [0, 4, 2, 3, 1]) X_test = np.repeat(X_test, [6, 7, 7, 6, 6], axis=-1) # res = self.models[0].predict(X_test[0]) res = [model.predict(X_test) for model in self.models] return np.array(res) gen = GenesisEvaluator(ensemble=False, split=0) gen("../../data/pocus_videos/convex/Pneu-Atlas-pneumonia.gif") # prep_vid_snippets("../../data/pocus_videos/convex/"+"Pneu-Atlas-pneumonia.gif") with open("../../data/video_input_data/cross_val.json", "r") as infile: cross_val_split = json.load(infile) WEIGHTS_DIR = "../video_genesis_lr1e4" VIDEO_DIR = "../../data/pocus_videos/convex" all_genesis_preds = [] all_frame_preds = [] for i in range(5): # gen_eval = GenesisEvaluator(weights_dir = WEIGHTS_DIR, ensemble=False, split=i) # normal_eval = VideoEvaluator(ensemble=False, split=i, model_id="vgg_cam", num_classes=4) files = cross_val_split[str(i)]["test"][0] # print(files) for f in files: print("evaluate", f) # run genesis model vid3d = Videoto3D("", 64, 64, 5, 5) vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20} X_test, _, fn = vid3d.video3d( [os.path.join(VIDEO_DIR, f)], ["cov"] ) # cov as dummy label print(X_test.shape) assert len(np.unique(fn)) == 1 # preds = gen_eval(os.path.join(VIDEO_DIR, f)) # vid_pred_genesis = np.argmax(np.mean(preds, axis=(0,1))) # all_genesis_preds.append(preds) # # run cam model # preds_framebased = normal_eval(os.path.join(VIDEO_DIR, f)) # frame_pred = np.argmax(np.mean(preds_framebased, axis=(0,1)),1) # all_frame_preds.append(preds_framebased) # print("genesis pred", vid_pred_genesis, "frame based pred", frame_pred) from tensorflow.keras.applications import VGG16 baseModel = VGG16( include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000 ) vid3d = Videoto3D("",64, 64, 5, 5) vid3d.max_vid = {"cov": 20, "pne": 20, "reg": 20} X_test, _, fn = vid3d.video3d(["../../data/pocus_videos/convex/Reg-NormalLungs.mp4"], ["cov"]) X_test.shape print(len(np.unique(fn))) # ## Evaluation of video classification i,j = cross_val_split[str(i)]["test"] # + lab_dict = {"cov":0, "pne":1, "reg":2} this_class = {"cov":"covid", "pne":"pneumonia", "reg":"regular"} saved_gt = [] for i in range(5): all_labels = [] files, labs = cross_val_split[str(i)]["test"] for j in range(len(files)): if not "Butterfly" in files[j]: assert os.path.exists(os.path.join("../../data/cross_validation/split"+str(i), this_class[labs[j]], files[j]+"_frame0.jpg")), files[j]+"_"+str(i) if files[j]!= "Reg-Youtube.mp4" and files[j]!="Reg-NormalLungs.mp4": all_labels.append(lab_dict[labs[j]]) saved_gt.append(all_labels) # - len(saved_gt[3]) import pickle eval_path = "evaluation_outputs.dat" with open(eval_path, "rb") as infile: vidbased, frame_based = pickle.load(infile) from sklearn.metrics import recall_score, precision_score, classification_report, matthews_corrcoef, balanced_accuracy_score import pandas as pd def mcc_multiclass(y_true, y_pred): y_true = np.asarray(y_true) y_pred = np.asarray(y_pred) mcc_out = [] for classe in np.unique(y_true): y_true_binary = (y_true==classe).astype(int) y_pred_binary = (y_pred==classe).astype(int) mcc_out.append(matthews_corrcoef(y_true_binary, y_pred_binary)) return mcc_out def specificity(y_true, y_pred): # true negatives / negatives y_true = np.asarray(y_true) y_pred = np.asarray(y_pred) spec_out = [] for classe in np.unique(y_true): negatives = np.sum((y_true!=classe).astype(int)) tn = np.sum((y_pred[y_true!=classe]!=classe).astype(int)) spec_out.append(tn/negatives) return spec_out # + classifier = frame_based # , frame_based]): saved_logits = [[] for _ in range(5)] split_counter = 0 frame_counter = len(saved_gt[0]) for vid_ind in range(len(vidbased)): # print(frame_based[vid_ind].shape) # print(vid_ind, split_counter) saved_logits[split_counter].append(np.argmax(np.mean(classifier[vid_ind], axis=0))) # saved_logits[split_counter].append(np.argmax(np.mean(classifier[vid_ind], axis=(0,1)))) if len(saved_logits[split_counter])==len(saved_gt[split_counter]): # next cross val split # print(vid_ind, len(saved_gt[split_counter]), split_counter) frame_counter += len(saved_gt[split_counter]) split_counter += 1 assert len(saved_logits[2])==len(saved_gt[2]) all_reports = [] accs = [] bal_accs = [] # vid_accs, _, vid_accs_bal, _ = video_accuracy(saved_logits, saved_gt, saved_files) for s in range(5): gt_s = saved_gt[s] print(len(gt_s), saved_logits[s]) pred_idx_s = saved_logits[s] # np.argmax(np.array(saved_logits[s]), axis=1) report = classification_report( gt_s, pred_idx_s, target_names=CLASSES, output_dict=True ) mcc_scores = mcc_multiclass(gt_s, pred_idx_s) spec_scores = specificity(gt_s, pred_idx_s) for i, cl in enumerate(CLASSES): report[cl]["mcc"] = mcc_scores[i] report[cl]["specificity"] = spec_scores[i] df = pd.DataFrame(report).transpose() df = df.drop(columns="support") df["accuracy"] = [report["accuracy"] for _ in range(len(df))] bal = balanced_accuracy_score(gt_s, pred_idx_s) df["balanced"] = [bal for _ in range(len(df))] # df["video"] = vid_accs[s] # df["video_balanced"] = vid_accs_bal[s] # print(df[:len(CLASSES)]) #print(report["accuracy"]) # print(np.array(df)[:3,:]) accs.append(report["accuracy"]) bal_accs.append(balanced_accuracy_score(gt_s, pred_idx_s)) # df = np.array(report) all_reports.append(np.array(df)[:len(CLASSES)]) df_arr = np.around(np.mean(all_reports, axis=0), 2) df_classes = pd.DataFrame(df_arr, columns=["Precision", "Recall", "F1-score", "MCC", "Specificity", "Accuracy", "Balanced"], index=CLASSES) print(df_classes) df_std = np.around(np.std(all_reports, axis=0), 2) df_std = pd.DataFrame(df_std, columns=["Precision", "Recall", "F1-score", "MCC", "Specificity", "Accuracy", "Balanced"], index=CLASSES) df_classes = df_classes[["Accuracy", "Balanced", "Precision", "Recall","Specificity", "F1-score", "MCC"]] df_std = df_std[["Accuracy", "Balanced", "Precision", "Recall","Specificity", "F1-score", "MCC"]] # df_classes.to_csv("model_comparison/vid_cam_3_mean.csv") # df_std.to_csv("model_comparison/vid_cam_3_std.csv") # -
pocovidnet/notebooks/video_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import json import kfp from kfp import components from kfp.components import func_to_container_op import kfp.dsl as dsl model_name = "neural_machine_translation" user_namespace = "kubeflow-mailsforyashj" def add_istio_annotation(op): op.add_pod_annotation(name='sidecar.istio.io/inject', value='false') return op @dsl.pipeline( name="End to end pipeline", description="An end to end example including hyperparameter tuning" ) def text_classification_pipeline(name=model_name, namespace=user_namespace, step=4000): # step 1: create a Katib experiment to tune hyperparameters objectiveConfig = { "type": "minimize", "goal": 1.8, "objectiveMetricName": "loss", } algorithmConfig = {"algorithmName" : "tpe"} parameters = [ {"name": "--epochs", "parameterType": "int", "feasibleSpace": {"min": "1","max": "3"}}, {"name": "--learning_rate", "parameterType": "double", "feasibleSpace": {"min": "0.0001", "max": "0.01"}}, ] rawTemplate = { "apiVersion": "kubeflow.org/v1", "kind": "TFJob", "metadata": { "name": "{{.Trial}}", "namespace": "{{.NameSpace}}" }, "spec": { "tfReplicaSpecs": { "Chief": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "spec": { "containers": [ { "command": [ "python3 /app/distributed_nmt_with_attention.py {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "gcr.io/gsoc-kf-example/distributed_tf_2_neural_machine_translation:1.0", "name": "tensorflow" } ] } } }, "Worker": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "spec": { "containers": [ { "command": [ "python3 /app/distributed_nmt_with_attention.py {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "gcr.io/gsoc-kf-example/distributed_tf_2_neural_machine_translation:1.0", "name": "tensorflow" } ] } } } } } } trialTemplate = { "goTemplate": { "rawTemplate": json.dumps(rawTemplate) } } metricsCollectorSpec = { "collector": { "kind": "StdOut" } } katib_experiment_launcher_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/master/components/kubeflow/katib-launcher/component.yaml') op1 = katib_experiment_launcher_op( experiment_name=name, experiment_namespace=namespace, parallel_trial_count=3, max_trial_count=12, objective=str(objectiveConfig), algorithm=str(algorithmConfig), trial_template=str(trialTemplate), parameters=str(parameters), metrics_collector=str(metricsCollectorSpec), # experiment_timeout_minutes=experimentTimeoutMinutes, delete_finished_experiment=False) # step2: create a TFJob to train your model with best hyperparameter tuned by Katib tfjobjson_template = Template(""" { "apiVersion": "kubeflow.org/v1", "kind": "TFJob", "metadata": { "name": "$name", "namespace": "$namespace", "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "tfReplicaSpecs": { "Chief": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "containers": [ { "command": [ "python3 /app/distributed_nmt_with_attention.py {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "gcr.io/gsoc-kf-example/distributed_tf_2_neural_machine_translation:1.0", "name": "tensorflow" } ] } } }, "Worker": { "replicas": 1, "restartPolicy": "OnFailure", "template": { "metadata": { "annotations": { "sidecar.istio.io/inject": "false" } }, "spec": { "containers": [ { "command": [ "python3 /app/distributed_nmt_with_attention.py {{- with .HyperParameters}} {{- range .}} {{.Name}}={{.Value}} {{- end}} {{- end}}" ], "image": "gcr.io/gsoc-kf-example/distributed_tf_2_neural_machine_translation:1.0", "name": "tensorflow" } ] } } } } } } """) op2 = convert_op(op1.output) tfjobjson = tfjobjson_template.substitute( {'args': op2.output, 'name': name, 'namespace': namespace, 'step': step, }) tfjob = json.loads(tfjobjson) train = dsl.ResourceOp( name="train", k8s_resource=tfjob, success_condition='status.replicaStatuses.Worker.succeeded==1,status.replicaStatuses.Chief.succeeded==1' ) dsl.get_pipeline_conf().add_op_transformer(add_istio_annotation) #Assign permission to Kubeflow Pipeline Service Account # !kubectl create clusterrolebinding $user_namespace-admin --clusterrole cluster-admin --serviceaccount=kubeflow:pipeline-run # + # Specify Kubeflow Pipeline Host host=None # Submit a pipeline run from kfp_tekton import TektonClient TektonClient(host=host).create_run_from_pipeline_func(text_classification_pipeline, arguments={}) # - #Cleanup your created jobs # !kubectl delete experiment -n $user_namespace $model_name # !kubectl delete tfjob -n $user_namespace $model_name
tensorflow_cuj/neural_machine_translation/tekton-pipeline-with-python-sdk.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import pandas as pd import researchpy as rp # reading data data = pd.read_csv('JnU_Distance_Learning.csv') data.head(2) # cleaning columns data.columns = data.columns.str.lower().str.strip().str.replace(' ', '_') data.head() # ## Demographic Summary rp.codebook(data['age']) # with 95CI and SE rp.summary_cont(data['age']) # age group groups = ["18-20", "21-23", "23-25"] data['age_group'] = pd.cut(data['age'], 3, labels = groups) data.head() rp.summary_cat(data['age_group']) rp.summary_cat(data['gender']) rp.summary_cat(data['how_long_was_the_average_class_time?']) # ## Distance Learning Information rp.summary_cat(data['definition']) data['which_type_of_platforms__did_you_use?'] data['which_type_of_platforms__did_you_use?'].value_counts(normalize=True) * 100 1.369863 + 0.684932 + 0.684932 + 0.684932+ 0.684932 data['how_long_have_you_been_doing_online_classes?'].value_counts(normalize=True) * 100 data['how_long_was_the_average_class_time?'].value_counts(normalize=True) * 100 data['average_classes_per_week'].describe() data['what_is_your_preferred_online_class_duration?'].value_counts(normalize=True) * 100 data['satisfactory_level_in_online_class:'].value_counts(normalize=True) * 100 rp.summary_cat(data['satisfactory_level_in_online_class:']) # ## Mental Disturbance rp.summary_cat(data['did_you_feel_any_mental_disturbance_during_online_class?']) # https://stackoverflow.com/questions/20162926/process-multiple-answer-questionnaire-from-google-forms-results-with-pandas data['what_kind_of_mental_disturbance_did_you_feel_during_online_class?'] data['what_kind_of_mental_disturbance_did_you_feel_during_online_class?'].value_counts() mental_problems = data['what_kind_of_mental_disturbance_did_you_feel_during_online_class?'].str.get_dummies(sep=', ') mental_problems mental_problems = mental_problems.replace({0: "No", 1: 'Yes'}) # + mental_problems.columns = mental_problems.columns.str.strip() mental_problems.columns # - rp.summary_cat(mental_problems) # ## Physical Problems rp.summary_cat(data['did_you_face_any_physical__problem_during_online_class?']) data['what_kind_of_physical_problem__did_you_face_during_online_class?'] physical_problems = data['what_kind_of_physical_problem__did_you_face_during_online_class?'].str.get_dummies(sep=', ') physical_problems.head() physical_problems = physical_problems.replace({0: "No", 1: 'Yes'}) physical_problems.head() # + physical_problems.columns = physical_problems.columns.str.strip() physical_problems.columns # - rp.summary_cat(physical_problems) # ## Cross Tabulation mental_problems mental_problems.columns data.columns cross, res = rp.crosstab(data['gender'], mental_problems['Felt bored'], test= "chi-square") cross res cross, res = rp.crosstab(data['age_group'], mental_problems['Annoying'], test= "chi-square") cross res cross, res = rp.crosstab(data['how_long_was_the_average_class_time?'], mental_problems['Annoying'], test= "chi-square") cross res
notebooks/JnUDistanceLearningAnalysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Instala paquetes en Colab # # Para trabajar, puedes instalar la mayoria de paquetes usando *pip* junto con *!*: # ! pip install tu-paquete-favorito # El signo de explamacion le dice a colab/jupyter que se ejecute un command via el sistema # ## Alternativa via subprocess # + import subprocess def run_cmd(cmd): print('Output of "{}":'.format(cmd)) print(subprocess.run(cmd,stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')) run_cmd('pip install tu-paquete-favorito') # - # # Boton de colab # # Codigo: # ``` # <a href="https://colab.research.google.com/github/riiaa19_workshop_template/blob/master/notebooks/1_ML_dia_a_dia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ``` # Crea este boton, ojo con el github repo url y notebook name: # <a href="https://colab.research.google.com/github/riiaa19_workshop_template/blob/master/notebooks/1_ML_dia_a_dia.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # # Codigo condicional a Colab/Jupyter # # Quieres tener un bloque de codigo que sea condicional a si se corre localmente con Jupyter o via colab? # + import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: print('usando colab') else: print('probablemente en jupyter') # - # # Clonando un github en colab # + import os import subprocess import sys GIT_NAME='riiaa19_workshop_template' GIT_URL='https://riiaa@github.com/riiaa/{}.git'.format(GIT_NAME) IN_COLAB = 'google.colab' in sys.modules def run_cmd(cmd): print('Output of "{}":'.format(cmd)) print(subprocess.run(cmd,stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')) if IN_COLAB: SRC_DIR='.' run_cmd('sudo apt-get install git-lfs') run_cmd('git lfs install') run_cmd('rm -rf sample_data') run_cmd('rm -rf {}'.format(GIT_NAME)) run_cmd('git clone --verbose --progress {}'.format(GIT_URL)) run_cmd('mv {}/* . '.format(GIT_NAME)) run_cmd('rm -rf {}'.format(GIT_NAME)) else: SRC_DIR='..' print('Using colab? {}, using root directory "{}"'.format(IN_COLAB,SRC_DIR)) # - # # Agregando el path de 'code' (del github) al path de python # + import sys, os IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: SRC_DIR='.' else: SRC_DIR='..' sys.path.append(os.path.join(SRC_DIR,'code')) import utils # - for i in range(1,5): utils.header_html('Header',i) # # For loop con tqdm # + from tqdm.autonotebook import tqdm import time for i in tqdm(range(10)): time.sleep(1)
notebooks/Trucos_Colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + # iris data clustering from mpl_toolkits.mplot3d import Axes3D from sklearn.cluster import KMeans from sklearn import datasets np.random.seed(5) centers = [[1, 1], [-1, -1], [1, -1]] iris = datasets.load_iris() X = iris.data y = iris.target estimators = {'k_means_iris_3': KMeans(n_clusters=3), 'k_means_iris_8': KMeans(n_clusters=8)} fignum = 1 for name, est in estimators.items(): fig = plt.figure(fignum) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134) plt.cla() est.fit(X) labels = est.labels_ ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float), s=100) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) ax.set_xlabel('Petal width') ax.set_ylabel('Sepal length') ax.set_zlabel('Petal length') plt.title(name) fignum = fignum + 1 plt.show() # - #qda import numpy as np import matplotlib.pyplot as plt X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) y = np.array([1, 1, 1, 2, 2, 2]) plt.scatter(X.T[0], X.T[1], c=y, s=100) plt.title("data") plt.show() # + from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis model = QuadraticDiscriminantAnalysis().fit(X, y) x = [[0, 0]] p = model.predict_proba(x)[0] plt.subplot(211) plt.scatter(X.T[0], X.T[1], c=y, s=100) plt.scatter(x[0][0], x[0][1], c='r', s=100) plt.title("data") plt.subplot(212) plt.bar(model.classes_, p, align="center") plt.title("conditional probability") plt.axis([0, 3, 0, 1]) plt.gca().xaxis.grid(False) plt.xticks(model.classes_) plt.tight_layout() plt.show() # + # 로지스틱회귀 - 분류보형중 가장 성능이 떨어짐 x_new 값에 따라 스레시홀드와 판별기준이 달라 질수 있음 # 즉 이상치가 들어왔을때 성능이 가장 떨어짐 from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression X0, y = make_classification(n_features=1, n_redundant=0, n_informative=1, n_clusters_per_class=1, random_state=4) model = LogisticRegression().fit(X0, y) xx = np.linspace(-3, 3, 100) sigm = 1.0/(1 + np.exp(-model.coef_[0][0]*xx - model.intercept_[0])) plt.subplot(211) plt.plot(xx, sigm) plt.scatter(X0, y, marker='o', c=y, s=100) plt.scatter(X0[0], model.predict(X0[:1]), marker='o', s=300, c='r', lw=5, alpha=0.5) plt.plot(xx, model.predict(xx[:, np.newaxis]) > 0.5, lw=2) plt.scatter(X0[0], model.predict_proba(X0[:1])[0][1], marker='x', s=300, c='r', lw=5, alpha=0.5) plt.axvline(X0[0], c='r', lw=2, alpha=0.5) plt.xlim(-3, 3) plt.subplot(212) plt.bar(model.classes_, model.predict_proba(X0[:1])[0], align="center") plt.xlim(-1, 2) plt.gca().xaxis.grid(False) plt.xticks(model.classes_) plt.title("conditional probability") plt.tight_layout() plt.show()
Practice/05-01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="Eq4raiEN-hu8" # Import libraries: import matplotlib.pyplot as plt import sys sys.path.append("..") from deepxde import * as dde import numpy as np # + id="fDLgsURPSEg6" def heat_eq_exact_solution(x, t): """ Returns the exact solution for a given x and t (for sinusoidal initial conditions). Parameters ---------- x : np.ndarray t : np.ndarray """ return np.exp(-(n**2*np.pi**2*a*t)/(L**2))*np.sin(n*np.pi*x/L) def gen_exact_solution(): """ Generates exact solution for the heat equation for the given values of x and t. """ # Number of points in each dimension: x_dim, t_dim = (256, 201) # Bounds of 'x' and 't': x_min, t_min = (0, 0.) x_max, t_max = (L, 1.) # Create tensors: t = np.linspace(t_min, t_max, num=t_dim).reshape(t_dim, 1) x = np.linspace(x_min, x_max, num=x_dim).reshape(x_dim, 1) usol = np.zeros((x_dim, t_dim)).reshape(x_dim, t_dim) # Obtain the value of the exact solution for each generated point: for i in range(x_dim): for j in range(t_dim): usol[i][j] = heat_eq_exact_solution(x[i],t[j]) # Save solution: np.savez('heat_eq_data', x=x, t=t, usol=usol) data = np.load('heat_eq_data.npz') def gen_testdata(): """ Import and preprocess the dataset with the exact solution. """ # Load the data: data = np.load('heat_eq_data.npz') # Obtain the values for t, x, and the excat solution: t, x, exact = data["t"], data["x"], data["usol"].T # Process the data and flatten it out (like labels and features): xx, tt = np.meshgrid(x, t) X = np.vstack((np.ravel(xx), np.ravel(tt))).T y = exact.flatten()[:, None] return X, y def main(): def pde(x, y): """ Expresses the PDE residual of the heat equation. """ dy_t = dde.grad.jacobian(y, x, i=0, j=1) dy_xx = dde.grad.hessian(y, x, i=0, j=0) return dy_t - a*dy_xx # Computational geometry: geom = dde.geometry.Interval(0, L) timedomain = dde.geometry.TimeDomain(0, 1) geomtime = dde.geometry.GeometryXTime(geom, timedomain) # Initial and boundary conditions: bc = dde.DirichletBC(geomtime, lambda x: 0, lambda _, on_boundary: on_boundary) ic = dde.IC( geomtime, lambda x: np.sin(n*np.pi*x[:, 0:1]/L), lambda _, on_initial: on_initial ) # Define the PDE problem and configurations of the network: data = dde.data.TimePDE( geomtime, pde, [bc, ic], num_domain=2540, num_boundary=80, num_initial=160, num_test=2540 ) net = dde.nn.FNN([2] + [20] * 3 + [1], "tanh", "Glorot normal") model = dde.Model(data, net) # Build and train the model: model.compile("adam", lr=1e-3) model.train(epochs=20000) model.compile("L-BFGS") losshistory, train_state = model.train() # Plot/print the results dde.saveplot(losshistory, train_state, issave=True, isplot=True) X, y_true = gen_testdata() y_pred = model.predict(X) f = model.predict(X, operator=pde) print("Mean residual:", np.mean(np.absolute(f))) print("L2 relative error:", dde.metrics.l2_relative_error(y_true, y_pred)) np.savetxt("test.dat", np.hstack((X, y_true, y_pred))) # + [markdown] id="4ji-gX9wybd-" # # In this code we will solve the heat equation using PINN implemented with the DeepXDE library. # # The equation is as follows: # # $\frac{\partial u}{\partial t} = \alpha \nabla^2 u\;$ . # # Where $\nabla^2$ is the laplacian differential operator, $\alpha$ is the thermal diffusivity constant and $u$ is the function (temperature) we want to approximate. # # In a unidimensional case we have: # # $\frac{\partial u(x, t)}{\partial t}$ = $\alpha \frac{\partial^2u(x,t)}{{\partial x}^2}\;$, $\;\;\;\; x \in [0, 1]\;$, $\;\;\;\; t \in [0, 1]\;$. # # With Dirichlet boundary conditions # # $u(0, t) = u(1, t) = 0\;$ , # # and periodic (sinoidal) initial conditions: # # $u(x, 0) = sin(n\pi x/L)\;$, $\;\;\;\; 0 < x < L\;$, $\;\;\;\; n = 1, 2, ...\;.$ # # This setup is a common problem in many differential equations textbooks and can be physically interpreted as the variation of temperature in a uniform and unidimensional bar over time. Here, the constant $\alpha$ is the thermal diffusivity (a property of the material that the bar is made) and $L$ is the lenght of the bar. # # # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="8QInMu-MxhIj" outputId="b294ad3a-282b-4fcc-f6df-6c6fc2c16be3" if __name__ == "__main__": # Problem parameters: a = 0.4 # Thermal diffusivity L = 1 # Length of the bar n = 1 # Frequency of the sinusoidal initial conditions # Generate a dataset with the exact solution (if you dont have one): gen_exact_solution() # Solve the equation: main()
examples/heat_conduction_1d_uniform_bar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import torch import numpy as np import pickle #from torchsummary import summary from collections import OrderedDict from torch.utils.tensorboard import SummaryWriter import datetime import time import copy import torchvision.datasets as datasets mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None) mnist_testset = datasets.MNIST(root='./data', train=False, download=True, transform=None) class DNN(torch.nn.Module): def __init__(self, layers): super(DNN, self).__init__() # depth of the neural network and activation function self.depth = len(layers)-1 self.activation = torch.nn.ReLU # layers: (depth-1)x 'linear' + 'activation' + last one 'linear' layer_list = list() for i in range(self.depth-1): layer_list.append( ('layer_%d' % i, torch.nn.Linear(layers[i], layers[i+1])) ) layer_list.append(('activation_%d' % i, self.activation())) layer_list.append( ('layer_%d' % (self.depth - 1), torch.nn.Linear(layers[-2], layers[-1])) ) layerDict = OrderedDict(layer_list) self.layers = torch.nn.Sequential(layerDict) def forward(self, x): out = self.layers(x) return out # + ## Asssume the dim of Traing and Testing are in shape [N,C,H,W] class DE_MLP(): def __init__(self, outdim=1,maxdepth=70,mindepth=5,minneuron=4,maxneuron=10,bsize=10,epoch=100,initSize=20,maxiter=10,stopcount=3,\ trainingset=None,validationset=None,trainingTarget=None,validateTarget=None,crossover=1): self.best=[] self.mean=[] self.outdim=outdim self.maxdepth=maxdepth self.mindepth=mindepth self.minneuron = minneuron self.maxneuron = maxneuron self.bsize = bsize self.epoch = epoch self.stopcount = stopcount self.pplSize = initSize self.maxiter = maxiter self.training = trainingset.reshape((trainingset.shape[0],-1)) self.validationSet = validationset.reshape((validationset.shape[0],-1)) self.target=trainingTarget self.validationTarget = validateTarget self.MLPlayerlist = [] self.depthlist=np.random.choice(range(self.mindepth,self.maxdepth),self.pplSize,replace=True) self.crossover=crossover self.adap_conf = (0.1,0.1,0.1,0.9) self.tb = SummaryWriter('./Statistic/'+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) if torch.cuda.is_available(): self.device = torch.device('cuda') else: self.device = torch.device('cpu') # Generate initial population for i in range(self.pplSize): depth = self.depthlist[i] tmp = [] # the number of neurons for the first layer is the dimension of the element in training data (in our case the size of the image) tmp.append(self.training.shape[1]) for j in range(depth): # generate the number of neurons for each layer tmp.append(np.random.choice(range(self.minneuron,self.maxneuron),1,replace=False)[0]) tmp.append(self.outdim) # last layer consist of 1 neuron by default tmp=np.array(tmp) self.MLPlayerlist.append(tmp) # define fit function - it calculates the fitness of one individual (one NN) def fit(self,config,id_,p=None): dnn = DNN(config) # define DNN based on configurations (layers and neurons) dnn.layers.to(self.device) best = float('inf') stop=0 opt = torch.optim.Adam(dnn.layers.parameters(), lr=0.001) loss = torch.nn.BCEWithLogitsLoss() batch = self.training.shape[0]//self.bsize vbatch = self.validationSet.shape[0]//self.bsize idxs = [x for x in range(self.training.shape[0])] vidxs = [x for x in range(self.validationSet.shape[0])] for e in range(self.epoch): start=time.time() # training np.random.shuffle(idxs) dnn.layers.train() batchloss=0 for i in range(batch): idx=idxs[i*self.bsize:i*self.bsize+self.bsize] opt.zero_grad() data = torch.tensor(self.training[idx]).float().to(self.device) y = torch.tensor(self.target[idx]).float().to(self.device) yhat = dnn(data) l = loss(yhat,y) batchloss+=l.item() l.backward() opt.step() # validating dnn.layers.eval() np.random.shuffle(vidxs) vloss=0 for i in range(vbatch): vidx=vidxs[i*self.bsize:i*self.bsize+self.bsize] vdata = torch.tensor(self.validationSet[vidx]).float().to(self.device) vy = torch.tensor(self.validationTarget[vidx]).float().to(self.device) vyhat = dnn(vdata) vl = loss(vyhat,vy) vloss += vl.item() vloss = vloss/vbatch # updating best loss if(vloss<best): best=vloss # updating stopping condition else: stop+=1 end=time.time() if(p is not None): print(f'Process: {p:3d}, ConfigID: {id_:3d}, Epoch: {e:3d}, Training Loss: {(batchloss/batch):10.8f}, Validation Loss: {(vloss):10.8f},\ Best: {best:10.8f}, StopCount/Limit: {stop:3d}/{self.stopcount:3d}, Time:{(end-start):10.8f}') else: print(f'ConfigID: {id_:3d}, Epoch: {e:3d}, Training Loss: {(batchloss/batch):10.8f}, Validation Loss: {(vloss):10.8f},\ Best: {best:10.8f}, StopCount/Limit: {stop:3d}/{self.stopcount:3d}, Time:{(end-start):10.8f}') # stopping condition and stopping if(stop>=self.stopcount): return best,config,id_ def jde_params(self,beta,cr): tau1,tau2,beta1,betau = self.adap_conf r1,r2,r3,r4 = np.random.uniform(0,1,4) if(r2 < tau1): beta = round(beta1 + r1 * betau,3) # else, keep the beta same if(r4 < tau2): cr = r3 return beta,cr def mutation_rand_1_z(self,x1,xs,beta,debug=False): indim = x1[0] x1 = x1[1:-1] # remove in/out dim xs[0] = xs[0][1:-1] xs[1] = xs[1][1:-1] if(debug): print(f'M1 : x1 len {x1.shape[0]} xs0 len {xs[0].shape[0]} xs1 len {xs[1].shape[0]}') print(f'M1 : x1 {x1} \nM1 : xs0 {xs[0]} \nM1 : xs1 len {xs[1]}') # # A. Mutating the # of layers minlen = np.min([x1.shape[0],xs[0].shape[0],xs[1].shape[0]]) if(debug): print(f'M1 : minlen {minlen}') newminlen = minlen targetlen=int(np.floor((x1.shape[0]) + beta * (xs[0].shape[0] - xs[1].shape[0]))) # check the sign of targetlen: if the new length == 0 , set it back to target len , if <0 , take abs if(targetlen==0): targetlen=x1.shape[0] elif(targetlen<0): targetlen=abs(targetlen) # check if new length is between mindepth and maxdepth if(targetlen < self.mindepth): targetlen = self.mindepth elif(targetlen > self.maxdepth): targetlen = self.maxdepth # new minimum length is min of minlen and targetlen if(targetlen < minlen): newminlen=targetlen if(debug): print(f'M1 : New Min Len :{newminlen}, Length Mutation :{targetlen}') # # B. Mutating the # of neurons # As lengths of x1, xs[0], xs[1] and new length can possibly be different, # 1) do the mutation for # of neurons for new minlen, # 2) apply the same rule to remaining if needed # xa = np.zeros((targetlen),dtype=int) # Mutating the number of neurons up to min len layers xa = x1[:newminlen] + beta * (xs[0][:newminlen] - xs[1][:newminlen]) # mutate on node with minlen # Mutating the number of neurons for the rest layers if(targetlen>minlen): xaa = np.zeros((targetlen-minlen)) a,b,c=None,None,None for i in range(targetlen-newminlen): # if number of neurons missing in vector, generate random from range (min) if(x1.shape[0]<=newminlen+i): a=np.random.choice(range(self.minneuron,self.maxneuron),1,replace=False)[0] elif(x1.shape[0]>newminlen+i): a=x1[newminlen+i] if(xs[0].shape[0]<=newminlen+i): b=np.random.choice(range(self.minneuron,self.maxneuron),1,replace=False)[0] elif(xs[0].shape[0]>newminlen+i): b=xs[0][newminlen+i] if(xs[1].shape[0]<=newminlen+i): c=np.random.choice(range(self.minneuron,self.maxneuron),1,replace=False)[0] elif(xs[1].shape[0]>newminlen+i): c=xs[1][newminlen+i] xaa[i]=a + beta * (b - c) xa = np.concatenate((xa, xaa), axis=None) # check if numbers of neurons are in allowed range for i in range(xa.shape[0]): if(xa[i]>self.maxneuron): xa[i]=self.maxneuron elif(xa[i]<self.minneuron): xa[i]=self.minneuron xa[i] = np.floor(xa[i]) xa = np.concatenate((np.array(indim,dtype=int),np.array(xa,dtype=int),np.array(self.outdim,dtype=int)), axis=None,dtype=int) return xa def crossoverMean(self,parent,u): order = [parent[1:-1],u[1:-1]] if(parent.shape[0] > u.shape[0]): order = [u[1:-1],parent[1:-1]] order[0] = np.resize(order[0],order[1].shape[0]) middle = np.mean(order,axis=0,dtype=int) child=np.insert(middle,0,parent[0]) child=np.append(child,parent[-1]) return child.copy() def crossoverRandomSwap(self,parent,u): # the first one is with min len order = [parent[1:-1],u[1:-1]] child = [parent[0]] if(parent.shape[0] > u.shape[0]): order = [u[1:-1],parent[1:-1]] order[0] = np.resize(order[0],order[1].shape[0]) swap = np.random.randint(0,2,order[0].shape[0]) for i in range(len(swap)): if(swap[i]==0): child.append(order[0][i]) else: child.append(order[1][i]) child.append(parent[-1]) return np.array(child).copy() def crossoverJDESwap(self,parent,u,cr): # the first one is with min len order = [parent[1:-1],u[1:-1]] child = [parent[0]] if(parent.shape[0] > u.shape[0]): order = [u[1:-1],parent[1:-1]] order[0] = np.resize(order[0],order[1].shape[0]) swap = np.random.randint(0,2,order[0].shape[0]) for i in range(len(swap)): r = np.random.uniform(0,1,1)[0] if(swap[i]==0 or r<=cr): child.append(order[0][i]) else: child.append(order[1][i]) child.append(parent[-1]) return np.array(child).copy() def run(self,beta=0.5,cr=0.9): current_gen=self.MLPlayerlist scores = np.zeros((self.pplSize)) #initial Run print('Initial Run Start') for i in range(len(self.MLPlayerlist)): b,_,_ = self.fit(self.MLPlayerlist[i],i) scores[i]=b print('Initial Run End') currentbest = np.min(scores) currentmean = np.mean(scores) currentbestidx = np.argmin(scores) print(f'Init Run Best: {currentbest}, Mean: {currentmean}, ID:{currentbestidx}, config: {current_gen[currentbestidx]}') #Generation Run for i in range(self.maxiter): structureStatistic=np.zeros((self.pplSize,5)) updatecount=0 start=time.time() print(f'Gen {i} Run Start') betas = np.ones(self.pplSize)*beta crs = np.ones(self.pplSize)*cr for j in range(self.pplSize): parent = current_gen[j] idx0,idx1,idxt = np.random.choice(range(0,self.pplSize),3,replace=False) target = current_gen[idxt] diff = [current_gen[idx0],current_gen[idx1]] betas[j],crs[j] = self.jde_params(betas[j],crs[j]) unitvector = self.mutation_rand_1_z(target,diff,betas[j]) nextGen = self.crossoverJDESwap(parent,unitvector,crs[j]) print(f'Next Gen: {nextGen}') structureStatistic[j,0]= nextGen.shape[0]-2 structureStatistic[j,1]= np.mean(nextGen[1:-1]) structureStatistic[j,2]= np.median(nextGen[1:-1]) structureStatistic[j,3]= np.quantile(nextGen[1:-1],0.25) structureStatistic[j,4]= np.quantile(nextGen[1:-1],0.75) s,_,_ = self.fit(nextGen,j) if(s<scores[j]): updatecount+=1 scores[j]=s current_gen[j]=nextGen print(f'Gen {i} Run End') end=time.time() currentbest = np.min(scores) currentmean = np.mean(scores) currentmedian = np.median(scores) currentq25 = np.quantile(scores,0.25) currentq75 = np.quantile(scores,0.75) currentbestidx = np.argmin(scores) genMeanLen = np.mean(structureStatistic[:,0]) genMedianLen = np.median(structureStatistic[:,0]) genq25Len = np.quantile(structureStatistic[:,0],0.25) genq75Len = np.quantile(structureStatistic[:,0],0.75) genMeanNode=np.median(structureStatistic[:,1]) genMedianNode=np.median(structureStatistic[:,2]) genq25Node = np.median(structureStatistic[:,3]) genq75Node = np.median(structureStatistic[:,4]) print(f'Run {i:3d} Best: {currentbest}, Mean: {currentmean}, ID:{currentbestidx}, config: {current_gen[currentbestidx]}, updatecount: {updatecount:3d}, Generation RunTime: {(end-start):10.8f}') self.tb.add_scalars("Scores Statistic (Generation)", {'best':currentbest,'mean':currentmean,'median':currentmedian,'q25':currentq25,'q75':currentq75}, i) self.tb.add_scalars("Structure Statistic (Generation) #HiddenLayer", {'mean':genMeanLen,'median':genMedianLen,'q25':genq25Len,'q75':genq75Len}, i) self.tb.add_scalars("Structure Statistic (Generation) #Node", {'mean':genMeanNode,'median':genMedianNode,'q25':genq25Node,'q75':genq75Node}, i) self.tb.add_scalar('Update Count',updatecount,i) self.tb.add_scalar('RunTime',(end-start),i) print(f'Run Completed : Best Score(loss): {np.min(scores)} , Config: {current_gen[np.argmin(scores)]}') return # - trainingt = torch.nn.functional.one_hot(mnist_trainset.targets,num_classes=10) validationt = torch.nn.functional.one_hot(mnist_testset.targets,num_classes=10) # (self, outdim=1,maxdepth=70,mindepth=5,minneuron=4,maxneuron=10,bsize=10,epoch=100,initSize=20,maxiter=10,stopcount=3, # trainingset=None,validationset=None,trainingTarget=None,validateTarget=None) d = DE_MLP(outdim=10,maxdepth=10,initSize=3,trainingset=mnist_trainset.data, validationset=mnist_testset.data, trainingTarget=trainingt,validateTarget=validationt) d.run() validationset=mnist_testset.data validationset.shape
.ipynb_checkpoints/JDE-v2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.8 64-bit # language: python # name: python36864bit8e6945bbe1b44598a616c749bfba4850 # --- import checklist from checklist.editor import Editor from checklist.perturb import Perturb from checklist.test_types import MFT, INV, DIR # For this tutorial, we will assume that our task is sentiment analysis. editor = Editor(language='chinese') # ## Minimum Functionality Test (MFT) # A Minimum Functionality Test is like a unit test in Software Engineering. # If you are testing a certain capability (e.g. 'can the model handle negation?'), an MFT is composed of simple examples that verify a specific behavior. # Let's create a very simple MFT for negations: pos = ['good', 'enjoyable', 'exciting', 'excellent', 'amazing', 'great', 'engaging'] neg = ['bad', 'terrible', 'awful', 'horrible'] # Now let's create some data with both positive and negative negations, assuming `1` means positive and `0` means negative: ret = editor.template('This is not {a:pos} {mask}.', pos=pos, labels=0, save=True, nsamples=100) ret += editor.template('This is not {a:neg} {mask}.', neg=neg, labels=1, save=True, nsamples=100) # We can easily turn this data into an MFT: test = MFT(ret.data, labels=ret.labels, name='Simple negation', capability='Negation', description='Very simple negations.') # Since `ret` is a dict where keys have the right names for test arguments, we can also use a simpler call: test = MFT(**ret, name='Simple negation', capability='Negation', description='Very simple negations.') # ### Running tests # Let's use an off-the-shelf sentiment analysis model. from pattern.en import sentiment import numpy as np def predict_proba(inputs): p1 = np.array([(sentiment(x)[0] + 1)/2. for x in inputs]).reshape(-1, 1) p0 = 1- p1 return np.hstack((p0, p1)) # Predictions are random predict_proba(['good', 'bad']) # There are two ways of running tests. # In the first (and simplest) way, you pass a function as argument to `test.run`, which gets called to make predictions. # We assume that the function returns a tuple with `(predictions, confidences)`, so we have a wrapper to turn softmax (like our function above) into this: from checklist.pred_wrapper import PredictorWrapper wrapped_pp = PredictorWrapper.wrap_softmax(predict_proba) wrapped_pp(['good']) # Once you have this function, running the test is as simple as calling `test.run`. # You can run the test on a subset of testcases (for speed's sake) by specifying `n` if needed. # We won't do that here since our test is small) test.run(wrapped_pp) # Once you run a test, you can print a summary of the results with `test.summary()` test.summary() # It seems that this off-the-shelf system has trouble with negation. # Note the failures: examples that should be negative are predicted as positive and vice versa (the number shown is the probability of positive) # If you are using jupyter notebooks, you can use `test.visual_summary()` for a nice visualization version of these results: # (I'll load a gif so you can see this in preview mode) # from IPython.display import HTML, Image # with open('visual_summary.gif','rb') as f: # display(Image(data=f.read(), format='png')) test.visual_summary() # The second way to run a test is from a prediction file. # First, we export the test into a text file: test.to_raw_file('/tmp/raw_file.txt') # cat /tmp/raw_file.txt | head -n 5 # Then, you get predictions from the examples in the raw file (in order) however you want, and save them in a prediction file. # Let's simulate this process here: docs = open('/tmp/raw_file.txt').read().splitlines() preds = predict_proba(docs) f = open('/tmp/softmax_preds.txt', 'w') for p in preds: f.write('%f %f\n' % tuple(p)) f.close() # cat /tmp/softmax_preds.txt | head -n 2 # We can run the test from this file. # We have to specify the file format (see the API for possible choices), or a function that takes a line in the file and outputs predictions and confidences. # Since we had already run this test, we have to set `overwrite=True` to overwrite the previous results. test.run_from_file('/tmp/softmax_preds.txt', file_format='softmax', overwrite=True) test.summary() # ## Invariance tests # An Invariance test (INV) is when we apply label-preserving perturbations to inputs and expect the model prediction to remain the same. # Let's start by creating a fictitious dataset to serve as an example, and process it with spacy import spacy nlp = spacy.load("en_core_web_sm") dataset = ['This was a very nice movie directed by <NAME>.', '<NAME> was brilliant.', 'I hated everything about this.', 'This movie was very bad.', 'I really liked this movie.', 'just bad.', 'amazing.', ] pdataset = list(nlp.pipe(dataset)) # Now let's apply a simple perturbation: changing people's names and expecting predictions to remain the same: t = Perturb.perturb(pdataset, Perturb.change_names) print('\n'.join(t.data[0][:3])) print('...') test = INV(**t) test.run(wrapped_pp) test.summary() # Let's try a different test: adding typos and expecting predictions to remain the same t = Perturb.perturb(dataset, Perturb.add_typos) print('\n'.join(t.data[0][:3])) print('...') test = INV(**t) test.run(wrapped_pp) test.summary() # ## Directional Expectation tests # A Directional Expectation test (DIR) is just like an INV, in the sense that we apply a perturbation to existing inputs. However, instead of expecting invariance, we expect the model to behave in a some specified way. # For example, let's start with a very simple perturbation: we'll add very negative phrases to the end of our small dataset: def add_negative(x): phrases = ['Anyway, I thought it was bad.', 'Having said this, I hated it', 'The director should be fired.'] return ['%s %s' % (x, p) for p in phrases] dataset[0], add_negative(dataset[0]) # What would we expect after this perturbation? I think the least we should expect is that the prediction probability of positive should **not go up** (that is, it should be monotonically decreasing). # Monotonicity is an expectation function that is built in, so we don't need to implement it. # `tolerance=0.1` means we won't consider it a failure if the prediction probability goes up by less than 0.1, only if it goes up by more from checklist.expect import Expect monotonic_decreasing = Expect.monotonic(label=1, increasing=False, tolerance=0.1) t = Perturb.perturb(dataset, add_negative) test = DIR(**t, expect=monotonic_decreasing) test.run(wrapped_pp) test.summary() # #### Writing custom expectation functions # If you are writing a custom expectation functions, it must return a float or bool for each example such that: # - `> 0` (or True) means passed, # - `<= 0` or False means fail, and (optionally) the magnitude of the failure, indicated by distance from 0, e.g. -10 is worse than -1 # - `None` means the test does not apply, and this should not be counted # # Each test case can have multiple examples. In our MFTs, each test case only had a single example, but in our INVs and DIRs, they had multiple examples (e.g. we changed people's names to various other names). # # You can write custom expectation functions at multiple levels of granularity. # # #### Expectation on a single example # # If you want to write an expectation function that acts on each individual example, you write a function with the following signature: # # `def fn(x, pred, conf, label=None, meta=None):` # # For example, let's write a (useless) expectation function that checks that every prediction confidence is higher than 0.95: # Function that expects prediction confidence to always be more than 0.9 def high_confidence(x, pred, conf, label=None, meta=None): return conf.max() > 0.95 # We then wrap this function with `Expect.single`, and apply it to our previous test to see the result: expect_fn = Expect.single(high_confidence) test.set_expect(expect_fn) test.summary() # Notice that every test case fails now: there is always some prediction in it that has confidence smaller than 0.95. # By default, the way we aggregate all results in a test case is such that the testcase fails if **any** examples in it fail (for MFTs), or **any but the first** fail for INVs and DIRs (because the first is usually the original data point before perturbation). You can change these defaults with the `agg_fn` argument. # #### Expectation on pairs # # Most of the time for DIRs, you want to write an expectation function that acts on pairs of `(original, new)` examples - that is, the original example and the perturbed examples. If this is the case, the signature is as follows: # # `def fn(orig_pred, pred, orig_conf, conf, labels=None, meta=None)` # # For example, let's write an expectation function that checks that the prediction **changed** after applying the perturbation, and wrap it with `Expect.pairwise`: def changed_pred(orig_pred, pred, orig_conf, conf, labels=None, meta=None): return pred != orig_pred expect_fn = Expect.pairwise(changed_pred) # Let's actually create a new test where we add negation to our dataset: t = Perturb.perturb(pdataset, Perturb.add_negation) t.data[0:2] test = DIR(**t, expect=expect_fn) test.run(wrapped_pp) test.summary() # Note the failure: prediction did not change after adding negation. # You can write much more complex expectation functions, but these are enough for this tutorial. # You can check out `expect.py` or the notebooks for Sentiment Analysis, QQP and SQuAD for many additional examples.
notebooks/tutorials/3. Test types, expectation functions, running tests.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from numpy import genfromtxt import pandas as pd import matplotlib.pyplot as plt from wordcloud import WordCloud # %matplotlib inline # - from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.metrics import f1_score sentences = pd.read_csv('../data/processed/temple_radio_1_2_sentences_with_translation.csv') sent_embeddings = genfromtxt('../data/processed/temple_radio_1_2_sentence_embeddings.csv', delimiter=',') pos_sent = sentences[sentences['Translation'] == 'There is increased amount of fat in the liver.'] pos_sent_embeddings = sent_embeddings[pos_sent.index, :] neg_sent = sentences.drop(pos_sent.index) neg_sent_embeddings = sent_embeddings[neg_sent.index, :] neg_sent_embeddings = sent_embeddings[neg_sent.index, :] pos_sent = pos_sent.reset_index(drop=True) neg_sent = neg_sent.reset_index(drop=True) new_sent_df = pd.concat([pos_sent, neg_sent]).reset_index(drop=True) # + y_pos = [1 for p in range(len(pos_sent_embeddings))] y_neg = [0 for n in range(len(neg_sent_embeddings))] pos_df = pd.DataFrame(pos_sent_embeddings) pos_df['class'] = y_pos neg_df = pd.DataFrame(neg_sent_embeddings) neg_df['class'] = y_neg pos_df = pos_df.reset_index(drop=True) neg_df = neg_df.reset_index(drop=True) new_df = pd.concat([pos_df, neg_df]).reset_index(drop=True) # - new_df['sentence'] = new_sent_df['Sentence'] new_df['translation'] = new_sent_df['Translation'] new_df = new_df.sample(frac=1).reset_index(drop=True) y = new_df[["class"]] X = new_df.drop(["class"], axis = 1) # + skf = StratifiedKFold(n_splits=5, random_state=0, shuffle=True) acc_scores, f1_scores = [], [] i = 0 conf_scores = [] for train, test in skf.split(X, y): # Provides train/test indices to split data in train/test sets. clf = LogisticRegression(random_state=0, max_iter=1000).fit(X.drop(["sentence", "translation"], axis = 1).loc[train], y.loc[train].values.ravel()) y_pred = clf.predict(X.drop(["sentence", "translation"], axis = 1).loc[test]) df_skf = pd.DataFrame(X[['sentence', 'translation']].loc[test]) df_skf['y_true'] = y.loc[test] df_skf['pred'] = y_pred df_skf.to_csv(f"../data/processed/classification_results/second_result_{i}.csv", index=False) acc = accuracy_score(y.loc[test], y_pred) f1 = f1_score(y.loc[test], y_pred) acc_scores.append(round(acc, 4)) f1_scores.append(round(f1, 4)) conf_scores.append(confusion_matrix(y.loc[test], y_pred)) i += 1 print(f"confusion matrix score:\n{sum(conf_scores)}") # - print(f"Acc scores: {acc_scores}\nMean acc: {sum(acc_scores)/len(acc_scores):.4f}\n") print(f"F1 scores: {f1_scores}\nMean f1: {sum(f1_scores)/len(f1_scores):.4f}\n") df = pd.read_csv("../data/processed/classification_results/second_result_0.csv") df
notebooks/3.1-classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from IPython.display import display, HTML import pandas as pd import time import re import os import sys import psycopg2 import ipywidgets as widgets from bokeh.io import output_file, show from bokeh.models import CheckboxGroup from ipywidgets import interact, interactive, fixed, Layout, HBox, VBox # Project functions and classes if './modules/' not in sys.path: sys.path.append('./modules') from widgets import * from selection import * from build_statements import * from query import * from plot import * from outliers import * def query_plot(b): start = time.time() default = ('ignore',' ') dd_default = ('ignore',) with output: # Prevalence value to tuple b = [1] b[0] = prevalence_indications_dd.value prevalence_indications_dd_tuple = tuple(b) b = [1] b[0] = deprivation_dd.value deprivation_indications_dd_tuple = tuple(b) features_values={'prescribing':[bnf_code_dd.value,'bnf_code',prescribing_measures_dd.value,\ month_dd.value,year_dd.value],\ 'prevalence':[prevalence_indications_dd_tuple,'indication',\ prevalence_measures_dd.value,'',year_dd.value],\ 'gender':[gender_dd.value,'sex',gender_age_measures_dd.value,\ month_dd.value,year_dd.value],\ 'age_groups':[age_groups_dd.value,'age_group',gender_age_measures_dd.value,\ month_dd.value,year_dd.value],\ 'deprivation':[deprivation_indications_dd_tuple,'deprivation_index',\ deprivation_measures_dd.value,'',year_dd.value]} headers = ['value','value_header','measure_header','month','year'] fvdf = pd.DataFrame.from_dict(features_values, orient='index',columns=headers) fvdf = fvdf[~fvdf['value'].isin([default,dd_default])] fvdf=fvdf.fillna('not_selected') features = list (fvdf.index) tables = get_tables(fvdf,features) fvdf['table'] = tables # Validate the selection so that all fields are provided correctly error, comment = validate (fvdf,features) if error: display(HTML('<h3 style="color:red">{comment}</h1>'.format (comment = comment))) else: # Use the query to build a title for the plot features_titles=get_title(fvdf) # Build query statements for the features selected statements = {} # If both age and gender were selected if ({'age_groups','gender'}) <= set(features): built_statement = genderANDage_statement(fvdf).build() statements['gender_age_groups'] = built_statement features = [feature for feature in features if feature not in ['age_groups','gender']] for feature in features: st = statement (feature,fvdf) built_statement = '{sel}\n{whe}\n{grp}'.format (sel=st.select (feature),\ whe=st.where(feature),grp=st.groupby(feature)) statements[feature] = built_statement # Locations data location_statement = "SELECT practice,practice_code,ccg,region,sub_region,\ longitudemerc,latitudemerc\ FROM practices_locations" statements['location']=location_statement # Use the statement to query the relevant tables queries_df=query(statements) # Check outliers in value columns outliers_choice_selection = outliers_choice.value if outliers_choice_selection == 'Remove': geo_columns=['practice_code', 'practice', 'ccg','region','sub_region', 'longitudemerc','latitudemerc'] value_headers=[h for h in queries_df.columns if h not in geo_columns] outliers_indices_collect=[] for header in value_headers: outliers_indices = detect_indices(queries_df,header) outliers_indices_collect+=outliers_indices queries_df=queries_df[~queries_df.index.isin(outliers_indices_collect)] # Download the joined data for the user download_choice_selection = download_choice.value if download_choice == 'CSV': queries_df.to_csv('queries_df.csv',index=False) elif download_choice == 'Excel': queries_df.to_csv('queries_df.xlsx',index=False) # Execution time print ('query execution time = {t} seconds'.format (t=round (time.time()-start))) # Generate plot output_choice_selection = output_choice.value map_plot(queries_df,features_titles, output_choice_selection) # Dashboard output = widgets.Output() button.on_click(query_plot) display(HTML('<h1 style="color:magenta">Geo Health</h1>\ <p style="color:blue">Select one or two of the following features. You can select multiple values for each:</p>\ <ul>\ <li style="color:magenta">Prescribing: Monthly, 2018-19</li>\ <li style="color:magenta">Prevalence: Annual, 2014-19</li>\ <li style="color:magenta">Age and Gender: Monthly, 2018-19</li>\ <li style="color:magenta">Deprivation: 2015, 2019</li>\ </ul>')) VBox(children=[tabs,button_date_box,output]) # - HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide(); } else { $('div.input').show(); } code_show = !code_show } $( document ).ready(code_toggle); </script> The raw code for this IPython notebook is by default hidden for easier reading. To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from sklearn import metrics from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier from sklearn.metrics import average_precision_score from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import precision_score import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import csv import time import warnings import math warnings.filterwarnings("ignore") result="./results/traffic_categorizer_for_attack_types.csv" csv_files=os.listdir("attacks") path="./attacks/" repetition=10 def folder(f_name): try: if not os.path.exists(f_name): os.makedirs(f_name) except OSError: print ("The folder could not be created!") folder_name="./results/" folder(folder_name) folder_name="./results/graph_traffic_categorizer_for_attack_types/" folder(folder_name) ml_list={ "Naive Bayes":GaussianNB(), "QDA":QDA(), "Random Forest":RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), "Nearest Neighbors":KNeighborsClassifier(3)} features={"Bot":["Bwd Packet Length Mean","Flow IAT Max","Flow Duration","Flow IAT Min","Label"], "DDoS":["Bwd Packet Length Std","Total Backward Packets","Fwd IAT Total","Flow Duration","Label"], "DoS GoldenEye":["Flow IAT Max","Bwd Packet Length Std","Flow IAT Min","Total Backward Packets","Label"], "DoS Hulk":["Bwd Packet Length Std","Fwd Packet Length Std","Fwd Packet Length Max","Flow IAT Min","Label"], "DoS Slowhttptest":["Flow IAT Mean","Fwd Packet Length Min","Bwd Packet Length Mean","Total Length of Bwd Packets","Label"], "DoS slowloris":["Flow IAT Mean","Total Length of Bwd Packets","Bwd Packet Length Mean","Total Fwd Packets","Label"], "FTP-Patator":["Fwd Packet Length Max","Fwd Packet Length Std","Fwd Packet Length Mean","Bwd Packet Length Std","Label"], "Heartbleed":["Total Backward Packets","Fwd Packet Length Max","Flow IAT Min","Bwd Packet Length Max","Label"], "Infiltration":["Fwd Packet Length Max","Fwd Packet Length Mean","Flow Duration","Total Length of Fwd Packets","Label"], "PortScan":["Flow Bytes/s","Total Length of Fwd Packets","Fwd IAT Total","Flow Duration","Label"], "SSH-Patator":["Fwd Packet Length Max","Flow Duration","Flow IAT Max","Total Length of Fwd Packets","Label"], "Web Attack":["Bwd Packet Length Std","Total Length of Fwd Packets","Flow Bytes/s","Flow IAT Max","Label"]} seconds=time.time() with open(result, "w", newline="",encoding="utf-8") as f: wrt = csv.writer(f) wrt.writerow(["File","ML algorithm","accuracy","Precision", "Recall" , "F1-score","Time"]) for j in csv_files: print ('%-17s %-17s %-15s %-15s %-15s %-15s %-15s' % ("File","ML algorithm","accuracy","Precision", "Recall" , "F1-score","Time"))# print output header a=[] feature_list=list(features[j[0:-4]]) df=pd.read_csv(path+j,usecols=feature_list) df=df.fillna(0) attack_or_not=[] for i in df["Label"]: if i =="BENIGN": attack_or_not.append(1) else: attack_or_not.append(0) df["Label"]=attack_or_not y = df["Label"] del df["Label"] feature_list.remove('Label') X = df[feature_list] for ii in ml_list: precision=[] recall=[] f1=[] accuracy=[] t_time=[] for i in range(repetition): second=time.time() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = repetition) clf = ml_list[ii] clf.fit(X_train, y_train) predict =clf.predict(X_test) f_1=f1_score(y_test, predict, average='macro') pr=precision_score(y_test, predict, average='macro') rc=recall_score(y_test, predict, average='macro') precision.append(float(pr)) recall.append(float(rc)) f1.append(float(f_1)) accuracy.append(clf.score(X_test, y_test)) t_time.append(float((time.time()-second)) ) print ('%-17s %-17s %-15s %-15s %-15s %-15s %-15s' % (j[0:-4],ii,str(round(np.mean(accuracy),2)),str(round(np.mean(precision),2)), str(round(np.mean(recall),2)),str(round(np.mean(f1),2)),str(round(np.mean(t_time),4)))) with open(result, "a", newline="",encoding="utf-8") as f: wrt = csv.writer(f) for i in range(0,len(t_time)): wrt.writerow([j[0:-4],ii,accuracy[i],precision[i],recall[i],f1[i],t_time[i]]) a.append(f1) ml=["Naive Bayes","QDA","Random Forest","Nearest Neighbors"] temp=0 fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(12, 6), sharey=True) for c in range(2): for b in range(4): axes[c, b].boxplot(a[temp] ) axes[c, b].set_title(str(j[0:-4])+" - "+str(ml[temp]),fontsize=7) axes[c, b].set_ylabel(("F measure")) temp+=1 if temp==7: break if temp==7: break plt.savefig(folder_name+j[0:-4]+".pdf",bbox_inches='tight', papertype = 'a4', orientation = 'portrait', format = 'pdf') plt.show() print("\n------------------------------------------------------------------------------------------------------\n\n") print("ok!") print("Total operation time: = ",time.time()- seconds ,"seconds")
traffic_categorizer_for_attack_types.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ### Test react # + import os import unittest from rmgpy import settings from rmgpy.species import Species from rmgpy.data.rmg import RMGDatabase import afm.react import afm.fragment # + # load kinetics database db_path = settings['database.directory'] database = RMGDatabase() # forbidden structure loading database.loadForbiddenStructures(os.path.join(db_path, 'forbiddenStructures.py')) # kinetics family loading database.loadKinetics(os.path.join(db_path, 'kinetics'), kineticsFamilies='default', reactionLibraries=[] ) # def test_react_fragments1(self): frag1 = afm.fragment.Fragment(label='frag1').from_SMILES_like_string('c1ccccc1CCCR') fragment_tuple = (frag1, ) reactions = afm.react.react_fragments(database.kinetics, fragment_tuple, only_families=['R_Recombination'], prod_resonance=False) # - len(reactions)==28 # + # def test_react_fragments2(self): frag1 = afm.fragment.Fragment(label='frag1').from_SMILES_like_string('c1ccccc1CCCR') frag2 = afm.fragment.Fragment(label='frag2').from_SMILES_like_string('[CH2]CR') fragment_tuple = (frag1, frag2) reactions = afm.react.react_fragments(database.kinetics, fragment_tuple, only_families=['H_Abstraction'], prod_resonance=False) # - len(reactions)==11 print reactions[0] frag1 frag2 reactions[0] reactions[1] print reactions[1] reactions[2] rxn_index = range(0,len(reactions)) rxn_index for index in rxn_index: display(reactions[index]) # + ## Test 3 # + # def test_generate_reactions_from_families1(self): frag1 = afm.fragment.Fragment(label='frag1').from_SMILES_like_string('CC') spec1 = Species(molecule=[frag1]) spec_tuple = (spec1,) reactions = database.kinetics.generate_reactions_from_families(spec_tuple) # - len(reactions)==3 rxn_index = range(0,len(reactions)) for index in rxn_index: display(reactions[index]) # + ## Test 4 # + #def test_generate_reactions_from_families2(self): frag1 = afm.fragment.Fragment(label='frag1').from_SMILES_like_string('CCR') spec1 = Species(molecule=[frag1]) spec_tuple = (spec1,) reactions = database.kinetics.generate_reactions_from_families(spec_tuple) # - len(reactions)==4 rxn_index = range(0,len(reactions)) for index in rxn_index: display(reactions[index])
.ipynb_checkpoints/Test_react-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/qtuter1997/100-Days-Of-ML-Code/blob/master/d2l/ReducingLearningRate.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="T2l32UJqC1TE" # **Learning rate schedule and Adaptive learning rate methods for Deep Learning** # # --- # The most of methods to optimal model when train deep neural network is use reduce learning rate. # # 1. Learning Rate schedules # 2. Adaptive learning rate methods # # Reference tutorial at [here](https://towardsdatascience.com/learning-rate-schedules-and-adaptive-learning-rate-methods-for-deep-learning-2c8f433990d1). # # In this tutorial, i train convolutional neural network on [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) using differing learning rate schedules and adaptive learing rate methods to compare their model performance. # # --- # # # # # # + [markdown] id="kjnvmY7zG-Td" # 1. Learning rate schedules # # Reducing the learning rate according to a pre-defined schedule. # # Common learning rate schdules include **time-base decay, step decay** and **exponential decay**. # # * Constant learning rate # * Time-base dacay # * Step decay # * Exponential decay # # In the first half of this tutorial # # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="PIEapBJUCxMi" outputId="8ae3173b-7d5e-4e4c-c352-e5ba27fb80a9" # Add library import numpy as np import math import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.keras.datasets import cifar10 from tensorflow.keras.utils import to_categorical from tensorflow.keras import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.optimizers import SGD, Adam, Adagrad, Adadelta, RMSprop from tensorflow.keras.models import model_from_json from tensorflow.keras.callbacks import LearningRateScheduler tf.__version__ # + [markdown] id="HH6KoaGJQpYB" # Load CIFAR10 data # + colab={"base_uri": "https://localhost:8080/"} id="PGoUvFFRQnR7" outputId="ae22094a-4aec-4b6e-9511-7311b17e0a81" batch_size = 64 # 2^x to match GPU num_classes = 10 epochs = 100 # input image demensions img_row, img_col = 32, 32 # The data, shuffled and split between train and test sets (X_train, Y_train), (X_test, Y_test) = cifar10.load_data() # + colab={"base_uri": "https://localhost:8080/"} id="F1art_RGSs9E" outputId="2c1de397-f384-43de-88b7-d7368e2fe63e" # In example, we only use cat [==3] and dog [==5] train_picks = np.ravel(np.logical_or(Y_train == 3, Y_train == 5)) test_picks = np.ravel(np.logical_or(Y_test == 3, Y_test == 5)) Y_train = np.array(Y_train[train_picks] == 5, dtype= int) Y_test = np.array(Y_test[test_picks] == 5, dtype= int) X_train = X_train[train_picks] X_test = X_test[test_picks] # + [markdown] id="OMxl4Ox3LlHN" # 2. Adaptive learning rate methods
d2l/ReducingLearningRate.ipynb
// --- // jupyter: // jupytext: // text_representation: // extension: .cs // format_name: light // format_version: '1.5' // jupytext_version: 1.14.4 // kernelspec: // display_name: .NET (C#) // language: C# // name: .net-csharp // --- // # Linear Solver Performance: Constant Coefficient Poisson, Single Core // // ### Part 2, Evaluation // + dotnet_interactive={"language": "csharp"} #r "BoSSSpad.dll" using System; using System.Collections.Generic; using System.Linq; using ilPSP; using ilPSP.Utils; using BoSSS.Platform; using BoSSS.Platform.LinAlg; using BoSSS.Foundation; using BoSSS.Foundation.XDG; using BoSSS.Foundation.Grid; using BoSSS.Foundation.Grid.Classic; using BoSSS.Foundation.Grid.RefElements; using BoSSS.Foundation.IO; using BoSSS.Solution; using BoSSS.Solution.Control; using BoSSS.Solution.GridImport; using BoSSS.Solution.Statistic; using BoSSS.Solution.Utils; using BoSSS.Solution.AdvancedSolvers; using BoSSS.Solution.Gnuplot; using BoSSS.Application.BoSSSpad; using BoSSS.Application.XNSE_Solver; using static BoSSS.Application.BoSSSpad.BoSSSshell; Init(); // + dotnet_interactive={"language": "csharp"} wmg.Init("LinslvPerf_ConstPoissonMpi1"); wmg.SetNameBasedSessionJobControlCorrelation(); // + dotnet_interactive={"language": "csharp"} // Manually add database (e.g. when accessing the database of user `JenkinsCI` as some other, regular guy) //var tempDb = OpenOrCreateDatabase(@"\\fdygitrunner\ValidationTests\LinslvPerf_ConstPoissonMpi1"); // + dotnet_interactive={"language": "csharp"} databases // + dotnet_interactive={"language": "csharp"} wmg.Sessions // + dotnet_interactive={"language": "csharp"} using System.IO; using BoSSS.Application.SipPoisson; // - // ## Create Table for Post-Processing // + dotnet_interactive={"language": "csharp"} // evaluators to add additional columns to the session table static class AddCols { static public object XdgMatrixAssembly_time(ISessionInfo SI) { var mcr = SI.GetProfiling()[0]; var ndS = mcr.FindChildren("MatrixAssembly"); var nd = ndS.ElementAt(0); return nd.TimeSpentInMethod.TotalSeconds / nd.CallCount; } static public object Aggregation_basis_init_time(ISessionInfo SI) { var mcr = SI.GetProfiling()[0]; var ndS = mcr.FindChildren("Aggregation_basis_init"); var nd = ndS.ElementAt(0); return nd.TimeSpentInMethod.TotalSeconds / nd.CallCount; } static public object Solver_Init_time(ISessionInfo SI) { var mcr = SI.GetProfiling()[0]; var ndS = mcr.FindChildren("Solver_Init"); var nd = ndS.ElementAt(0); //Console.WriteLine("Number of nodes: " + ndS.Count() + " cc " + nd.CallCount ); return nd.TimeSpentInMethod.TotalSeconds / nd.CallCount; } static public object Solver_Run_time(ISessionInfo SI) { var mcr = SI.GetProfiling()[0]; var ndS = mcr.FindChildren("Solver_Run"); var nd = ndS.ElementAt(0); return nd.TimeSpentInMethod.TotalSeconds / nd.CallCount; } static public object NoOfCores(ISessionInfo SI){ return SI.GetProfiling().Length; } } // + dotnet_interactive={"language": "csharp"} wmg.AdditionalSessionTableColums.Clear(); wmg.AdditionalSessionTableColums.Add("MatrixAssembly", AddCols.XdgMatrixAssembly_time); wmg.AdditionalSessionTableColums.Add("Aggregation_basis_init_time", AddCols.Aggregation_basis_init_time); wmg.AdditionalSessionTableColums.Add("Solver_Init_time", AddCols.Solver_Init_time); wmg.AdditionalSessionTableColums.Add("Solver_Run_time", AddCols.Solver_Run_time); wmg.AdditionalSessionTableColums.Add("NoOfCores", AddCols.NoOfCores); // + dotnet_interactive={"language": "csharp"} var SessTab = wmg.SessionTable; //// The Session column can't be serialized, //// we have to remove it //List<string> AllCols = FullSessTab.GetColumnNames().ToList(); //AllCols.Remove("Session"); //FullSessTab = FullSessTab.ExtractColumns(AllCols.ToArray()); // - // Select those columns which are of interest: // + dotnet_interactive={"language": "csharp"} var SubTab = SessTab.ExtractColumns( "SessionName","DGdegree:T", "Grid:NoOfCells", "LinearSolver.SolverCode", "DOFs", "MatrixAssembly", "Aggregation_basis_init_time", "Solver_Init_time", "Solver_Run_time", "NoIter"); // + dotnet_interactive={"language": "csharp"} // Filename var now = DateTime.Now; string docName = wmg.CurrentProject + "_" + now.Year + "-" + now.Month + "-" + now.Day; SubTab.SaveToFile(docName + ".json"); SubTab.ToCSVFile(docName + ".csv"); // + dotnet_interactive={"language": "csharp"} //SessTab.Print(); // + [markdown] dotnet_interactive={"language": "csharp"} // ## Vizualisation of Results // - // The following data is available: // + dotnet_interactive={"language": "csharp"} SubTab // - // Available DG degrees: // + dotnet_interactive={"language": "csharp"} var DGdegrees = SubTab.GetColumn<int>("DGdegree:T").ToSet().OrderBy(s => s).ToArray(); DGdegrees // - // All used solvers: // + dotnet_interactive={"language": "csharp"} SubTab.GetColumn<int>("LinearSolver.SolverCode").ToSet().Select(i => (LinearSolverCode)i).ToArray() // + dotnet_interactive={"language": "csharp"} //RuntimePlot.PlotNow(); // + dotnet_interactive={"language": "csharp"} //using SolverCodes = BoSSS.Solution.Control.LinearSolverConfig.Code; // + dotnet_interactive={"language": "csharp"} int rows = DGdegrees.Length; int columns = 1; //LinearSolverCode[] ignore_solvers = {LinearSolverCode.classic_pardiso, LinearSolverCode.classic_cg, LinearSolverCode.exp_softpcg_schwarz, LinearSolverConfig.Code.exp_direct_lapack}; LinearSolverCode[] ignore_solvers = {}; Plot2Ddata[,] multiplots = new Plot2Ddata[rows,columns]; int pDegree = 0; for(int iRow = 0; iRow < rows; iRow++) { for(int iCol = 0; iCol < columns; iCol++) { if(pDegree > rows*columns-1) continue; int tmpDG = -1; if(pDegree < DGdegrees.Length) tmpDG = DGdegrees[pDegree]; //Create Graphs multiplots[iRow,iCol] = SubTab.ToPlot("DOFs", "Solver_Run_time", // column for x- and y delegate (int iTabRow, IDictionary<string, object> Row, out string Nmn, out PlotFormat Fmt) { // - - - - - - - - - - - - - - - - - - - - - - - - // PlotRowSelector: // selects, which table row goes to which graph, // and the respective color // - - - - - - - - - - - - - - - - - - - - - - - - int k = Convert.ToInt32(Row["DGdegree:T"]); if(k != tmpDG) { // degree does not match -> not in this plot Nmn = null; Fmt = null; return; } LinearSolverCode solver_name = (LinearSolverCode)Convert.ToInt32(Row["LinearSolver.SolverCode"]); //ignore the solvers specified in ingore_solvers foreach(LinearSolverCode sc in ignore_solvers){ if(solver_name==sc){ System.Console.WriteLine("skipped"); Nmn = null; Fmt = null; return; } } // ===!!!=== CAUTION ===!!!=== // Solverframework (SolverEnum numbers) have changed! Please check LinearSolver and NonLinearSolver for details //process the other solvers Fmt = new PlotFormat(); switch(solver_name) { case LinearSolverCode.classic_pardiso: Nmn = "Pardiso"; Fmt.PointType = PointTypes.OpenCircle; Fmt.DashType = DashTypes.Dotted; break; case LinearSolverCode.exp_gmres_levelpmg: Nmn = "GMRES w. pTG"; Fmt.PointType = PointTypes.Box; break; case LinearSolverCode.exp_Kcycle_schwarz: Nmn = "Kcycle w. add.-Schwarz"; Fmt.PointType = PointTypes.LowerTriangle; break; default: Console.WriteLine("unknown: " + solver_name); Nmn = "unknown"; break; } //Console.WriteLine("name is: " + solver_name); Fmt.PointSize = 0.5; Fmt.Style = Styles.LinesPoints; Fmt.LineColor = LineColors.Black; Fmt.LineWidth = 3; }); double[] dof = new[]{1e3,1e6}; double[] linT = dof.Select(x => x*0.001).ToArray(); var linP = new Plot2Ddata.XYvalues("linear", dof, linT); linP.Format.FromString("- black"); ArrayTools.AddToArray(linP, ref multiplots[iRow,iCol].dataGroups); //all about axis string Title = string.Format("$k = {0}$", tmpDG); multiplots[iRow,iCol].Ylabel = Title; multiplots[iRow,iCol].LogX = true; multiplots[iRow,iCol].LogY = true; //specify range of axis multiplots[iRow,iCol].YrangeMin = 1e-2; multiplots[iRow,iCol].YrangeMax = 1e+4; multiplots[iRow,iCol].XrangeMin = 1e2; multiplots[iRow,iCol].XrangeMax = 1e7; //multiplots[iRow,iCol].Y2rangeMin = 1e-3; //multiplots[iRow,iCol].Y2rangeMax = 1e+4; //multiplots[iRow,iCol].X2rangeMin = 1e2; //multiplots[iRow,iCol].X2rangeMax = 1e7; //spacing around plots multiplots[iRow,iCol].ShowLegend = false; multiplots[iRow,iCol].tmargin = 0; multiplots[iRow,iCol].bmargin = 2; multiplots[iRow,iCol].lmargin = 20; //multiplots[iRow,iCol].rmargin = 5; multiplots[iRow,iCol].ShowXtics = false; //I am legend ... if(iRow == 0) { multiplots[iRow,iCol].ShowLegend = true; multiplots[iRow,iCol].LegendAlignment = new string[]{"i","t","l"}; //multiplots[iRow,iCol].LegendSwap = true; } //and i am special ... if(iRow == rows - 1) multiplots[iRow,iCol].ShowXtics = true; pDegree++; } } //multiplots.PlotCairolatex().WriteMinimalCompileableExample("latex/solvers.tex"); //multiplots.AddDummyPlotsForLegend(3,0); multiplots.PlotNow() // + dotnet_interactive={"language": "csharp"} // + dotnet_interactive={"language": "csharp"}
doc/handbook/apdx-NodeSolverPerformance/PoissonConstCoeff/LinslvPerf_ConstPoissonMpi1-Pt2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Stats: Real and Erroneous Sources of Variability # # *Purpose*: Variability is an unavoidable reality of engineering: Physical systems do not behave in identical ways, even under similar circumstances. It is important to draw a distinction between sources of variability that corrupt measurements, and other sources of variability that can genuinely change physical outcomes: real vs erroneous variability. Therefore, we should analyze real and erroneous sources of variability differently---this can lead to more accurate data analysis and more efficient engineering designs. # # ## Setup # import grama as gr import numpy as np DF = gr.Intention() # %matplotlib inline # # Concepts # # Let's go over some fundamental concepts. # # ## Variability # # > *Variability* is a form of uncertainty that occurs when repeated measurements from a single chosen scenario exhibit different outcomes. # # There are several parts to this definition: # # *Different outcomes* # # We must have different outcomes for there to be variability. If repeated measurement of the same quantity produce the same value, then there is no variability. # # *Repeated measurements* # # Variability requires repeated measurements, implying that there must be more than one measurement. Unique events that will never occur again cannot be said to exhibit variability. # # *A single chosen scenario* # # Variability arises when we focus on a single scenario and take repeated measurements. Different scenarios would logically produce different measurements. We might describe a scenario in terms of detailed qualitative factors (e.g. a particular manufacturing process, a particular machinist) or we may describe a family of scenarios with continuous variables (e.g. a continuum of feed speeds when machining). # # Variability is perhaps best understood by way of example; let's take a look at one. # # Dataset of die cast aluminum specimens from grama.data import df_shewhart df_shewhart.head() # This is a dataset of measured physical properties for individual specimens of a die cast aluminum. You'll investigate this dataset below. # # ### __q1__ Make a histogram # # Make a histogram of the `tensile_strength` in `df_shewhart`. Answer the questions under *observations* below. # TASK: Make a histogram # solution-begin # NOTE: I additionally compute statistics about # the observed variability print( df_shewhart >> gr.tf_summarize( mu=gr.mean(DF.tensile_strength), sd=gr.sd(DF.tensile_strength), ) >> gr.tf_mutate(cov=DF.sd/DF.mu) ) # solution-end ( df_shewhart # solution-begin >> gr.ggplot(gr.aes("tensile_strength")) + gr.geom_histogram(bins=20) # solution-end ) # *Observations* # # <!-- task-begin --> # - Are all of the measured `tensile_strength` values identical? Do the measurements exhibit *variability*? # - (Your response here?) # - How much variability do the `tensile_strength` measurements exhibit? (You can use the histogram, or compute a quantitative summary.) # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - Are all of the measured `tensile_strength` values identical? Do the measurements exhibit *variability*? # - The measured values are **not** identical; the measurements do exhibit variability. # - How much variability do the `tensile_strength` measurements exhibit? (You can use the histogram, or compute a quantitative summary.) # - The measurements are *highly* variable; the coefficient of variation is around `0.13`, which is huge for a material property. # <!-- solution-end --> # # ## Real vs Erroneous Variability # # Variability in engineering applications falls into one of two broad categories: # # > *Real variability* is any source of variability that can affect a physical outcome. # # > *Error* (or *erroneous variability*) is any source of variability that corrupts a measurement. # # Note that errors cannot *directly* affect physical outcomes. A corrupted measurement does not reflect actual changes in the physical quantity one is trying to measure. *However*, making an engineering decision based on corrupted information will lead to a corrupted decision. Errors can only affect the physical world through actions taken based on those errors. # # Real and erroneous sources of variability can only be named in a specific context; to that end, let's look at a specific example. # # ### Example: Manufacturing of Metals # # The following image schematically depicts the manufacture and characterization of metallic materials. # # ![Manufacturing example](./images/manufacturing-examples-numbered.png) # Image credit: <NAME> # # Variability can enter metals manufacturing at different stages and in different ways: # # 1. (Real variability) Mistakes during the formulation stage of metals manufacturing can lead to an off-target composition. This will lead to explainable differences in the realized material properties, and will cause the material to not behave as desired. # # 2. (Real variability) The presence of nicks and cracks in manufactured parts is an unavoidable reality. It is practically impossible to completely eliminate these imperfections. The presence and size of these imperfections will affect the fatigue life of a component. Thus, manufactured parts will tend to exhibit random variability in their fatigue properties. # # 3. (Error) Material properties are characterized by physical tests, such as tensile tests. Mistakes during testing can occur, such as slippage of the part within the mechanical grips. Such mistakes will lead to explainable differences in the *measured* properties, but cannot directly lead to differences in physical outcomes. # # 4. (Error) Property measurements typically involve a conversion from mechanical phenomena to electrical signals. However, we tend to take measurements in an electromagnetically dirty environment: Ambient EM interference can induce electrical noise that corrupts a measurement. Since an EM environment is too complex to explain deterministically, this will tend to lead to random differences in measured properties, but this variability cannot directly lead to differences in physical outcomes. # # Variability will have different consequences if it is real or erroneous. You will get some practice reasoning through these consequences in the next task. # # ### __q2__ Consequences of variability # # The following depicts the `tensile_strength` measurements, but with an additional vertical line depicting the applied stress from a loading scenario. Imagine that multiple parts will be constructed using the same manufacturing process characterized by the dataset. Answer the questions under *observations* below. # # *Note*: Remember that a part fails if its tensile strength is less than an applied stress. # # NOTE: No need to edit; run and inspect ( df_shewhart >> gr.ggplot(gr.aes("tensile_strength")) + gr.geom_histogram(bins=20, color="black") + gr.geom_vline(xintercept=25000, linetype="dashed", size=2) + gr.scale_y_continuous(breaks=range(0, 10)) + gr.theme_minimal() ) # *Observations* # # Imagine that multiple parts will be constructed using the same manufacturing process characterized by the dataset. The vertical line represents the stress that will be applied to these parts. # # <!-- task-begin --> # - Suppose that the variability depicted above is **purely erroneous**; the tensile strength is actually a fixed constant, and all the observed variability is due to measurement error. How confident would you be that **all** the parts would survive their loading conditions? # - (Your response here?) # - Now suppose that the variability depicted above is **purely real**; the tensile strength varies from part to part. How confident would you be that **all** the parts would survive their loading conditions? # - (Your response here?) # - Which of the two assumptions above would be the most *conservative* assumption to make? Why? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - Suppose that the variability depicted above is **purely erroneous**; the tensile strength is actually a fixed constant, and all the observed variability is due to measurement error. How confident would you be that **all** the parts would survive their loading conditions? # - If the variability were purely error, then the tensile strength is a fixed constant most likely somewhere in the middle of the distribution of measured values. In this case, all of the parts are likely to survive their loading conditions. # - Now suppose that the variability depicted above is **purely real**; the tensile strength varies from part to part. How confident would you be that **all** the parts would survive their loading conditions? # - If the variability were purely real, then the tensile strength varies with each part. Based on the results above, it is essentially guaranteed that some parts will not survive their loading conditions. # - Which of the two assumptions above would be the most *conservative* assumption to make? Why? # - Considering the variability as real is a more conservative assumption; this is the less favorable assumption, one which will encourage us to take defensive design measures against possible failures. # <!-- solution-end --> # # ## Scenarios # # Let's get some practice reasoning with real and erroneous variability. To do so, let's consider the following context: # # *The Context*: A manufacturer is producing steel parts for a landing gear. The part in question takes a heavy load, and if it fails it will disable the aircraft on the ground. These parts will be manufactured in bulk; approximately 500 will be made and installed in commercial aircraft that will operate for decades. # # + [markdown] tags=[] # ### __q3__ Multiple measurements, same part # # Imagine the manufacturer selects one part and performs multiple non-destructive tensile tests on that single part, under similar conditions. The measured elasticity from each test is slightly different. Is this variability real or erroneous? # # *Questions* # # <!-- task-begin --> # - Is this variability most likely real or erroneous? # - (Your response here) # - Why? # - (Your response here) # <!-- task-end --> # <!-- solution-begin --> # - Is this variability most likely real or erroneous? # - Erroneous # - Why? # - The properties of the component are largely set at manufacturing time, and the tests are carried out under similar conditions. If multiple measurements on the same part (under similar conditions) return different values, then the variability is most likely induced by the measurement process---thus, erroneous variability. # <!-- solution-end --> # # - # ### __q4__ Multiple measurements, multiple parts # # Imagine the manufacturer selects multiple parts and---for each part---performs multiple non-destructive tensile tests, all under similar conditions. The measured elasticity values for each part are averaged to provide a more reliable estimate for each part. Upon comparing the parts, each averaged value is fairly different. Is this variability real or erroneous? # # *Questions* # # <!-- task-begin --> # - Is this variability most likely real or erroneous? # - (Your response here) # - Why? # - (Your response here) # <!-- task-end --> # <!-- solution-begin --> # - Is this variability most likely real or erroneous? # - Real # - Why? # - The properties of the component are essentially set at manufacturing time; but no manufacturing process can create items with identical properties. Particularly if variability remains after erroneous variability has been controlled and eliminated (as described in the prompt), then the remaining variability is real. # <!-- solution-end --> # # ### __q5__ Single measurements, multiple parts # # Now the manufacturer selects multiple parts and performs a destructive tensile test to characterize the strength of each part, with tests carried out under similar conditions. The measured strength values exhibit a fair amount of variability. Is this variability real or erroneous? # # *Questions* # # <!-- task-begin --> # - Is this variability most likely real or erroneous? # - (Your response here) # - Why? # - (Your response here) # <!-- task-end --> # <!-- solution-begin --> # - Is this variability most likely real or erroneous? # - Without more information, it is impossible to say. It is likely a combination of real and erroneous sources. # - Why? # - Real variability can arise from the manufacturing process, and error can arise from the measurement. Since the measurement is destructive, we cannot use multiple measurements to characterize the erroneous variability. # <!-- solution-end --> # # # Heuristics # # Recognizing whether variability is real or erroneous is important for making data analysis choices. The following two heuristics (rules of thumb) will help you make such choices. # # ## Pure Error: Take the mean # # The following is a statistical rule-of-thumb when dealing with variability that is *purely* error: # # > (Mean Heuristic) If the observed variability is purely error, then taking the mean of the data will tend to reduce the variability due to error. # # ```{admonition} Caveat: Bias in measurements # :class: warning # Of course, if the errors are not actually random but insted exhibit *bias*, then taking the mean will not remove these systematic errors. One way to help combat bias is to take truly *independent* measurements; for instance, have different operators take the same measurement, or use two different measurement techniques on the same specimen. # ``` # ### __q6__ Take the mean strength # # Compute the mean `tensile_strength`. Answer the questions under *observations* below. # # TASK: Compute the mean tensile_strength ( df_shewhart # solution-begin >> gr.tf_summarize( ts_mean=gr.mean(DF.tensile_strength) ) # solution-end ) # *Observations* # # Suppose that multiple parts will be made according to the same manufacturing process and subjected to a tensile load. As the parts are currently designed the applied stress will be `26,000` psi, but you are considering whether to redesign the part (to reduce the applied stress). # # <!-- task-begin --> # - Suppose the variability is purely erroneous. Does a redesign seem necessary, based on assuming pure error, the strength value you computed above, and current the applied load? # - (Your response here?) # - Is the analysis above safe for design purposes? Why or why not? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - Suppose the variability is purely erroneous. Does a redesign seem necessary, based on assuming pure error, the strength value you computed above, and current the applied load? # - The summarized strength of $\approx31,800$ psi is well above the applied stress of $26,000$ psi. Under the assumption of pure error, redesign does not seem necessary. # - Is the analysis above safe for design purposes? Why or why not? # - The analysis above is not safe for design; if the assumption of pure error is wrong, then there exists the strong possibility of failure. # <!-- solution-end --> # # ## Purely Real: Use quantiles # # The following is a statistical rule-of-thumb when dealing with purely real variability: # # > (Quantile Heuristic) If the observed variability is purely real, then taking a lower or upper quantile of the data is a reasonable summary. # # + [markdown] tags=[] # ### __q7__ Compute a quantile of strength # # Compute the `0.05` quantile of the `tensile_strength`. Answer the questions under *observations* below. # # - # TASK: Compute the 0.05 quantile of the tensile_strength ( df_shewhart # solution-begin >> gr.tf_summarize( ts_lo=gr.quant(DF.tensile_strength, p=0.05) ) # solution-end ) # *Observations* # # Suppose that multiple parts will be made according to the same manufacturing process and subjected to a tensile load. As the parts are currently designed the applied stress will be `26,000` psi, but you are considering whether to redesign the part (to reduce the applied stress). # # <!-- task-begin --> # - Suppose the variability is purely real. Does a redesign seem necessary, based on assuming real variability, the strength value you computed above, and current the applied load? # - (Your response here?) # - What probability of failure is associated with your computed strength value? # - (Your response here?) # - Is the analysis above safe for design purposes? Why or why not? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - Suppose the variability is purely real. Does a redesign seem necessary, based on assuming real variability, the strength value you computed above, and current the applied load? # - The summarized strength is $\approx25,600$ psi, which is a bit below the applied stres of $26,000$ psi. This suggests a redesign is necessary to reduce the applied stress and make the parts safer. # - What probability of failure is associated with your computed strength value? # - The `0.05` quantile is associated with a probability of failure of $5\%$. # - Is the analysis above safe for design purposes? Why or why not? # - The analysis above is certainly safer than assuming pure error; it helps us to achieve a desired (small) failure rate. # <!-- solution-end --> # # ## Mixed Real and Error: Nested data # # In most practical situations, variability is not purely real or purely error; rather, it is mixed. This is a challenging situation, but we can still use the mean and quantile heuristics to guide choices about data analysis. # # The following code sets up data collected according to a *nested* collection scheme: Multiple independent measurements are taken on each of multiple specimens. # # + ## NOTE: No need to edit np.random.seed(101) # Parameters for data generation n_sample = 100 n_meas = 8 var_real = 0.20 var_meas = 0.05 # Simulate nested data df_nested = ( gr.df_make( x=gr.marg_mom("lognorm", floc=0, mean=1, var=var_real).r(n_sample), i_sample=range(n_sample), ) >> gr.tf_outer(gr.df_make(i_meas=range(n_meas))) >> gr.tf_mutate( x_meas=DF.x + gr.marg_mom("norm", mean=0, var=var_meas).r(n_sample*n_meas), ) >> gr.tf_select("i_sample", "i_meas", "x_meas") >> gr.tf_arrange(DF.i_sample, DF.i_meas) ) df_nested.head(8) # - # Here: # # - `i_sample` denotes the sample ID, each sample is an independent component # - `i_meas` denotes the measurement ID, measurement are repeated on each sample # - note that these are per-sample, e.g. `i_meas == 0` for `i_sample == 0` is unrelated to `i_meas == 0` for `i_sample == 1`. # - `x_meas` is the measured value # # ### __q8__ Identify real and erroneous sources # # Inspect the figure below; answer the questions under *observations* below. # # NOTE: No need to edit; run and inspect ( df_nested >> gr.tf_filter(DF.i_sample <= 8) >> gr.ggplot(gr.aes("i_sample")) + gr.geom_point( gr.aes(y="x_meas"), size=1, color="grey", ) ) # *Observations* # # <!-- task-begin --> # - Is the variability *within* each group of `i_sample` value most likely real or erroneous? # - (Your response here) # - Is the variability *across* each group of `i_sample` value most likely real or erroneous? # - (Your response here) # - Suppose the variability *within* each group of `i_sample` were *purely* erroneous; which heuristic could you apply to each group? # - (Your response here) # <!-- task-end --> # <!-- solution-begin --> # - Is the variability *within* each group of `i_sample` value most likely real or erroneous? # - This variability is most likely erroneous; these are repeated measurements of the same part. # - Is the variability *across* each group of `i_sample` value most likely real or erroneous? # - This variability is most likely mixed; we have the same error mentioned in the previous question, plus the variability across each sample. # - Suppose the variability *within* each group of `i_sample` were *purely* erroneous; which heuristic could you apply to each group? # - The mean heuristic # <!-- solution-end --> # # + [markdown] tags=[] # ### __q9__ Apply the mean heuristic # # Use the *mean heuristic* to construct a more stable measurement for each sample. # # *Hint*: To do this, you will need to `group_by()` an appropriate variable in the data and `summarize()`. # # + # TASK: Apply the mean heuristic to produce a more stable measurement # for each sample `x_mean`. Compute the variance within the same # grouping `var_meas`. df_nested_mean = ( df_nested # solution-begin >> gr.tf_group_by(DF.i_sample) >> gr.tf_summarize( x_mean=gr.mean(DF.x_meas), var_meas=gr.var(DF.x_meas), ) # solution-end # Ensure the data are ungrouped after all operations >> gr.tf_ungroup() ) # NOTE: No need to edit; use this to check your results assert \ df_nested_mean.shape[0] == n_sample, \ "Incorrect number of rows; make sure you grouped by the correct variable." assert \ "x_mean" in df_nested_mean.columns, \ "x_mean column not found in result" assert \ "var_meas" in df_nested_mean.columns, \ "var_meas column not found in result" assert \ abs(df_nested_mean.x_mean.var() - 0.20) < 0.1, \ "Sanity check on x_mean failed; did you take the mean?" print("Success!") # - # Once you have successfully summarized the data, run the following code and answer the questions under *observations* below. # ## NOTE: No need to edit; use this to inspect your results # This code estimates the variability due to measurement print( df_nested_mean >> gr.tf_ungroup() >> gr.tf_summarize( var_meas=gr.mean(DF.var_meas), var_real=gr.var(DF.x_mean), ) >> gr.tf_mutate(var_total=DF.var_meas + DF.var_real) ) # *Observations* # # <!-- task-begin --> # - About how much variability is due to error? # - (Your response here?) # - About how much variability is real? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - About how much variability is due to error? # - I find that `var_meas ~ 0.05`, which is about $20\%$ of the total. # - About how much variability is real? # - I find that `var_real ~ 0.20`, which is about $80\%$ of the total. # <!-- solution-end --> # # ### Benefits of eliminating erroneous variability # # Remember that the mean heuristic helps us reduce erroneous variability; what are the advantages of this technique? Let's compare the nested and summarized datasets: # # NOTE: No need to edit; visualize both the original and summarized data ( gr.ggplot() + gr.geom_density( data=df_nested >> gr.tf_mutate(source="Original"), mapping=gr.aes("x_meas", color="source"), ) + gr.geom_density( data=df_nested_mean >> gr.tf_mutate(source="Summarized"), mapping=gr.aes("x_mean", color="source"), ) ) # As we might expect, the summarized data appears to be narrower, which would make sense if we had reduced the erroneous variability. This can have useful implications for designs based on our data analysis. # # ### __q10__ Apply the quantile heuristic # # Apply the quantile heuristic to the summarized data. Use the same quantiles `0.10, 0.90` computed on the original data. Answer the questions under *observations* below. # # *Hint*: Do you need to group again to apply the quantile heuristic? # # + # NOTE: No need to edit; this is an analysis of the original data print("Quantiles of original data") print( df_nested >> gr.tf_summarize( x_lo=gr.quant(DF.x_meas, 0.10), x_hi=gr.quant(DF.x_meas, 0.90), ) # Compute the interval width >> gr.tf_mutate(width=DF.x_hi - DF.x_lo) ) # TASK: Modify the code below print("\nQuantiles of summarized data") print( df_nested_mean # task-begin # TODO: Apply the quantile heuristic here # task-end # solution-begin >> gr.tf_summarize( x_lo=gr.quant(DF.x_mean, 0.10), x_hi=gr.quant(DF.x_mean, 0.90), ) # solution-end # Compute the interval width >> gr.tf_mutate(width=DF.x_hi - DF.x_lo) ) # - # *Observations* # # <!-- task-begin --> # - Which interval is narrower (`width`)? The original or the summarized? # - (Your response here?) # - How has applying the mean heuristic affected the width of the interval? Why has this happened? # - (Your response here?) # - Suppose we were using these data to guide engineering design decisions, and a narrower interval would enable a more efficient design. Which analysis would enable a more efficient design? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - Which interval is narrower (`width`)? The original or the summarized? # - The summarized data gives a narrower interval. # - How has applying the mean heuristic affected the width of the interval? Why has this happened? # - Applying the mean heuristic has reduced the width of the interval. This makes sense, as applying the mean heuristic has reduced the variability due to error, allowing us to focus in on the real variability alone. # - Suppose we were using these data to guide engineering design decisions, and a narrower interval would enable a more efficient design. Which analysis would enable a more efficient design? # - The summarized approach (using the mean and quantile heuristics) would enable a more efficient design. # <!-- solution-end --> # # Note that this kind of analysis is **only** possible when the data have this kind of nested structure; when we have multiple measurements of each specimen. The next task highlights this important fact. # # ### __q11__ Importance of structured data collection # # Inspect the data, and answer the questions under *observations* below. # # NOTE: No need to edit; run and inspect ( df_nested >> gr.tf_select(DF.x_meas) >> gr.tf_head(8) ) # *Observations* # # <!-- task-begin --> # - With this form of `df_nested`, could we apply the mean and quantile heuristics to analyze the data? Why or why not? # - (Your response here?) # <!-- task-end --> # <!-- solution-begin --> # - With this form of `df_nested`, could we apply the mean and quantile heuristics to analyze the data? Why or why not? # - We could not apply the mean and quantile heuristics; this is because we do not have access to information about how the data were collected. Practically, we have no variables to group by. # <!-- solution-end --> # # + [markdown] jp-MarkdownHeadingCollapsed=true tags=[] # # Closing punchline # # Variability can come in different forms: Real variability affects the outcomes we are interested in, while erroneous variability corrupts measurements and inflates the variability we observe. A careful combination of planned data collection (nested data) and data analysis can help us reduce erroneous variability, which can enable more efficient designs. #
exercises/e-stat02-source-master.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/mlelarge/dataflowr/blob/master/Notebooks/03_siamese/03_siamese_triplet_mnist_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="Ei8ToiWt1EzS" colab_type="text" # # Siamese networks # + [markdown] id="kd38ctpK1EzT" colab_type="text" # ## Colab preparation # + colab_type="code" id="sx9e_pXlCuti" colab={} # %load_ext autoreload # %autoreload 2 # + id="aU__26HN1EzZ" colab_type="code" colab={} from os import path import numpy as np import random # %matplotlib inline import matplotlib import matplotlib.pyplot as plt import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F import torchvision from torchvision.datasets import MNIST from torchvision import transforms # + [markdown] colab_type="text" id="UMMut8UVCutt" # # 1. Setup and initializations # We'll go through learning feature embeddings using different loss functions on MNIST dataset. This is just for visualization purposes, thus we'll be using 2-dimensional embeddings which isn't the best choice in practice. # # For every experiment the same embedding network is used (`32 conv 5x5 -> ReLU -> MaxPool 2x2 -> 64 conv 5x5 -> ReLU -> MaxPool 2x2 -> Fully Connected 256 -> ReLU -> Fully Connected 256 -> ReLU -> Fully Connected 2`) with the same hyperparameters. # + id="tvM4mzUm1Ezc" colab_type="code" colab={} class ExperimentParams(): def __init__(self): self.num_classes = 10 self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.batch_size = 256 self.lr = 1e-2 self.num_epochs = 10 self.num_workers = 4 self.data_dir = '/home/docker_user/' args = ExperimentParams() # + [markdown] colab_type="text" id="BcmGBqXeCutw" # ## 1.1 Prepare dataset # We'll be working on MNIST dataset # + colab_type="code" id="AQSHPH9P0BNx" colab={} mean, std = 0.1307, 0.3081 train_dataset = MNIST(f'{args.data_dir}/data/MNIST', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((mean,), (std,)) ])) test_dataset = MNIST(f'{args.data_dir}/data/MNIST', train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((mean,), (std,)) ])) # + [markdown] colab_type="text" id="TcZTFRnjCut3" # ## 1.2 Common setup # + colab_type="code" id="Dz2xh66UCut5" colab={} mnist_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'] def plot_embeddings(embeddings, targets, title='',xlim=None, ylim=None): plt.figure(figsize=(10,10)) for i in range(10): inds = np.where(targets==i)[0] plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=colors[i]) if xlim: plt.xlim(xlim[0], xlim[1]) if ylim: plt.ylim(ylim[0], ylim[1]) plt.legend(mnist_classes) plt.title(title) def extract_embeddings(dataloader, model, args): with torch.no_grad(): model.eval() embeddings = np.zeros((len(dataloader.dataset), 2)) labels = np.zeros(len(dataloader.dataset)) k = 0 for images, target in dataloader: images = images.to(args.device) embeddings[k:k+len(images)] = model.get_embedding(images).data.cpu().numpy() labels[k:k+len(images)] = target.numpy() k += len(images) return embeddings, labels def get_raw_images(dataloader,mean=0.1307, std=0.3081): raw_images = np.zeros((len(dataloader.dataset), 1, 28, 28)) k = 0 for input, target in dataloader: raw_images[k:k+len(input)] = (input*std + mean).data.cpu().numpy() k += len(input) return raw_images def show(img, title=None): # img is a torch.Tensor npimg = img.numpy() plt.imshow(np.transpose(npimg, (1,2,0)), interpolation='nearest') plt.axis('off') if title is not None: plt.title(title) plt.pause(0.001) # pause a bit so that plots are updated # + [markdown] colab_type="text" id="75moY8AyCut_" # # 2. Baseline: Classification with softmax # We'll train the model for classification and use outputs of penultimate layer as embeddings. # + [markdown] id="zJCmiKwf1Ezl" colab_type="text" # We will define our base embedding architecture which will serve as common backbone for our experiments # + [markdown] id="Ew1qgU_G1Ezl" colab_type="text" # ## 2.1 Architecture # + [markdown] id="49bn26xC1Ezm" colab_type="text" # #### Exercise # # Complete the missing blocks in the definition of the following `EmbeddingNet` architecture: (`32 conv 5x5 -> ReLU -> MaxPool 2x2 -> 64 conv 5x5 -> ReLU -> MaxPool 2x2 -> Fully Connected 256 -> ReLU -> Fully Connected 256 -> ReLU -> Fully Connected 2`) # + id="C1LjKH_21Ezn" colab_type="code" colab={} class EmbeddingNet(nn.Module): def __init__(self): super(EmbeddingNet, self).__init__() # self.conv1 = nn.Conv2d(1, ...) # self.conv2 = ... # self.fc1 = ... # self.fc2 = ... # self.fc3 = ... def forward(self, x, debug=False): x1 = F.max_pool2d(F.relu(self.conv1(x)), kernel_size=2, stride=2) # output = ... if debug == True: print(f'input: {x.size()}') print(f'x1: {x1.size()}') return output def get_embedding(self, x): return self.forward(x) # + [markdown] id="lOYKYEm41Ezq" colab_type="text" # If you want to better check the sizes of the hidden states and do debugging, you can add a `debug` variable in the `forward` function just like above # + id="D1PZbEVB1Ezr" colab_type="code" colab={} input = torch.zeros(1, 1, 28, 28) net = EmbeddingNet() net(input,debug=True) # + [markdown] id="uGMMtw_F1Ezv" colab_type="text" # Now let's define a classification net that will add fully connected layer on top of `EmbeddingNet` # + [markdown] id="JpWMG8kF1Ezv" colab_type="text" # #### Exercice # # Fill in the missing spots in the `forward` pass: # + id="7ChpUMXh1Ezw" colab_type="code" colab={} class ClassificationNet(nn.Module): def __init__(self, embedding_net, num_classes): super(ClassificationNet, self).__init__() self.embedding_net = embedding_net self.prelu = nn.PReLU() self.fc = nn.Linear(2, num_classes) def forward(self, x, debug=False): # replace None with necessary entry embedding = None output = self.fc(self.prelu(embedding)) # if debug == True: # print(f'input: {x.size()}') # print(f'embedding: {embedding.size()}') # print(f'output: {output.size()}') return output def get_embedding(self, x): # replace None with necessary entry return self.prelu(None) # + [markdown] id="e5ib15cj1Ezy" colab_type="text" # ## 2.2 Training # + id="KA8szJpj1Ezy" colab_type="code" colab={} # Set up data loaders kwargs = {'num_workers': args.num_workers, 'pin_memory': True} train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs) embedding_net = EmbeddingNet() model = ClassificationNet(embedding_net, num_classes=args.num_classes) loss_fn = torch.nn.CrossEntropyLoss() model.to(args.device) loss_fn.to(args.device) optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1) # + id="i_NBcq3w1Ez3" colab_type="code" colab={} train_embeddings_baseline, train_labels_baseline = extract_embeddings(train_loader, model, args) plot_embeddings(train_embeddings_baseline, train_labels_baseline, 'Train embeddings before training') # + id="eOLUpe-U1Ez6" colab_type="code" colab={} def train_classif_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=50): model.train() losses = [] total_loss, total_corrects, num_samples = 0, 0, 0 corrects = 0 for batch_idx, (data, target) in enumerate(train_loader): num_samples += data.size(0) data, target = data.to(args.device), target.to(args.device) optimizer.zero_grad() outputs = model(data) loss = loss_fn(outputs, target) losses.append(loss.data.item()) _,preds = torch.max(outputs.data,1) corrects += torch.sum(preds == target.data).cpu() loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f} \tAccuracy: {}'.format( batch_idx * len(data[0]), len(train_loader.dataset), 100. * batch_idx / len(train_loader), np.mean(losses), float(total_corrects)/num_samples)) total_loss += np.sum(losses) total_corrects += corrects losses, corrects = [], 0 return total_loss/(batch_idx + 1), total_corrects/num_samples def test_classif_epoch(test_loader, model, loss_fn, args, log_interval=50): with torch.no_grad(): model.eval() losses, corrects = [], 0 num_samples = 0 for batch_idx, (data, target) in enumerate(test_loader): num_samples += data.size(0) data, target = data.to(args.device), target.to(args.device) outputs = model(data) loss = loss_fn(outputs, target) losses.append(loss.data.item()) _,preds = torch.max(outputs.data,1) corrects += torch.sum(preds == target.data).cpu() return np.sum(losses)/(batch_idx + 1), corrects/num_samples # + id="yfGNHVAz1Ez9" colab_type="code" colab={} start_epoch = 0 for epoch in range(0, start_epoch): scheduler.step() for epoch in range(start_epoch, args.num_epochs): scheduler.step() train_loss, train_accuracy = train_classif_epoch(train_loader, model, loss_fn, optimizer, args) message = 'Epoch: {}/{}. Train set: Average loss: {:.4f} Average accuracy: {:.4f}'.format( epoch + 1, args.num_epochs, train_loss, train_accuracy) val_loss, val_accuracy = test_classif_epoch(test_loader, model, loss_fn, args) message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f} Average accuracy: {:.4f}'.format(epoch + 1, args.num_epochs, val_loss, val_accuracy) print(message) # + [markdown] id="OIjXt_931E0A" colab_type="text" # ## 2.3 Visualizations # + colab_type="code" id="q9o-6-ytCuuT" colab={} train_embeddings_baseline, train_labels_baseline = extract_embeddings(train_loader, model, args) plot_embeddings(train_embeddings_baseline, train_labels_baseline, 'Train embeddings classification') test_embeddings_baseline, test_labels_baseline = extract_embeddings(test_loader, model, args) plot_embeddings(test_embeddings_baseline, test_labels_baseline, 'Test embeddings classification') # + [markdown] colab_type="text" id="fDdsCIiHCuub" # While the embeddings look separable (which is what we trained them for), they don't have good metric properties. They might not be the best choice as a descriptor for new classes. # + [markdown] colab_type="text" id="j9FhHE-tCuuc" # # 3. Siamese network # Now we'll train a siamese network that takes a pair of images and trains the embeddings so that the distance between them is minimized if their from the same class or greater than some margin value if they represent different classes. # We'll minimize a contrastive loss function*: # $$L_{contrastive}(x_0, x_1, y) = \frac{1}{2} y \lVert f(x_0)-f(x_1)\rVert_2^2 + \frac{1}{2}(1-y)\{max(0, m-\lVert f(x_0)-f(x_1)\rVert_2)\}^2$$ # # *<NAME>, <NAME>, <NAME>, [Dimensionality reduction by learning an invariant mapping](http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf), CVPR 2006* # + [markdown] id="GfA7R5dF1E0F" colab_type="text" # ## 3.1 Architecture # We will first define the siamese architecture on top of our `EmbeddingNet` # + [markdown] id="sNEzScHM1E0F" colab_type="text" # #### Exercise # # Fill in the forward part of `SiameseNet` # + id="Hj7Jzzwz1E0G" colab_type="code" colab={} class SiameseNet(nn.Module): def __init__(self, embedding_net): super(SiameseNet, self).__init__() self.embedding_net = embedding_net def forward(self, x1, x2): # fill in the missing 2 lines :) return output1, output2 def get_embedding(self, x): return self.embedding_net(x) # + [markdown] id="935Hwn0R1E0J" colab_type="text" # ## 3.2 Data loader # We will also need to adapt our data loader to fetch pairs of images # + id="XAFAu0We1E0K" colab_type="code" colab={} from torch.utils.data import Dataset from torch.utils.data.sampler import BatchSampler from PIL import Image class SiameseMNIST(Dataset): """ train mode: For each sample creates randomly a positive or a negative pair test mode: Creates fixed pairs for testing """ def __init__(self, mnist_dataset): self.mnist_dataset = mnist_dataset self.train = self.mnist_dataset.train self.transform = self.mnist_dataset.transform if self.train: self.train_labels = self.mnist_dataset.train_labels self.train_data = self.mnist_dataset.train_data self.labels_set = set(self.train_labels.numpy()) self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0] for label in self.labels_set} else: # generate fixed pairs for testing self.test_labels = self.mnist_dataset.test_labels self.test_data = self.mnist_dataset.test_data self.labels_set = set(self.test_labels.numpy()) ''' create a dictionary with an entry key for each label and the value an array storing the indices of the images having the respective label ''' self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0] for label in self.labels_set} random_state = np.random.RandomState(42) # itereate through test_data and randomly select samples with the same label positive_pairs = [[i, random_state.choice(self.label_to_indices[self.test_labels[i].item()]), 1] for i in range(0, len(self.test_data), 2)] # itereate through test_data, create a list of all labels different from current one and then # randomly select samples with having one of these labels negative_pairs = [[i, random_state.choice(self.label_to_indices[ np.random.choice( list(self.labels_set - set([self.test_labels[i].item()])) ) ]), 0] for i in range(1, len(self.test_data), 2)] # format: [index1, index2, label(0/1)] self.test_pairs = positive_pairs + negative_pairs def __getitem__(self, index): # at train time pairs of samples are fetched randomly on the fly if self.train: # select random label,i.e. similar (1) or non-similar (0) images target = np.random.randint(0, 2) img1, label1 = self.train_data[index], self.train_labels[index].item() if target == 1: # select an image with the same label as img1 siamese_index = index while siamese_index == index: siamese_index = np.random.choice(self.label_to_indices[label1]) else: # eliminate label1 from the set of possible labels to select siamese_label = np.random.choice(list(self.labels_set - set([label1]))) # randomly select an image having a label from this subset siamese_index = np.random.choice(self.label_to_indices[siamese_label]) img2 = self.train_data[siamese_index] else: img1 = self.test_data[self.test_pairs[index][0]] img2 = self.test_data[self.test_pairs[index][1]] target = self.test_pairs[index][2] img1 = Image.fromarray(img1.numpy(), mode='L') img2 = Image.fromarray(img2.numpy(), mode='L') if self.transform is not None: img1 = self.transform(img1) img2 = self.transform(img2) return (img1, img2), target def __len__(self): return len(self.mnist_dataset) # + [markdown] id="5Z1fGqzm1E0M" colab_type="text" # ## 3.3 Loss function # + [markdown] id="C48BKMc91E0N" colab_type="text" # $$L_{contrastive}(x_0, x_1, y) = \frac{1}{2} y \lVert f(x_0)-f(x_1)\rVert_2^2 + \frac{1}{2}(1-y)\{max(0, m-\lVert f(x_0)-f(x_1)\rVert_2)\}^2$$ # + [markdown] id="EpVpPbBZ1E0N" colab_type="text" # #### Exercise # # Fill in the missing parts of the `contrastive loss` # + id="jiKkNs3e1E0O" colab_type="code" colab={} class ContrastiveLoss(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise """ def __init__(self, margin): super(ContrastiveLoss, self).__init__() self.margin = margin self.eps = 1e-9 def forward(self, output1, output2, target, size_average=True): # compute squared distances between output2 and output1 squared_distances = None # add the second term from them loss. You can use ReLU for compressing the max formula losses = 0.5 * (target.float() * squared_distances + None ) return losses.mean() if size_average else losses.sum() # + [markdown] id="l7nclgG91E0Q" colab_type="text" # ## 3.4 Training # + colab_type="code" id="-qpIq-TzCuue" colab={} # Set up data loaders siamese_train_dataset = SiameseMNIST(train_dataset) # Returns pairs of images and target same/different siamese_test_dataset = SiameseMNIST(test_dataset) args.batch_size = 128 kwargs = {'num_workers': args.num_workers, 'pin_memory': True} siamese_train_loader = torch.utils.data.DataLoader(siamese_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) siamese_test_loader = torch.utils.data.DataLoader(siamese_test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs) margin = 1. embedding_net = EmbeddingNet() model = SiameseNet(embedding_net) loss_fn = ContrastiveLoss(margin) model.to(args.device) loss_fn.to(args.device) args.lr = 1e-3 optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1) # + id="fYJbkKAm1E0S" colab_type="code" colab={} def train_siamese_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=100): model.train() losses = [] total_loss, num_samples = 0, 0 for batch_idx, (data, target) in enumerate(train_loader): num_samples += data[0].size(0) data = tuple(d.to(args.device) for d in data) target = target.to(args.device) optimizer.zero_grad() outputs = model(data[0], data[1]) # alternatively: outputs = model(*data) loss = loss_fn(outputs[0], outputs[1], target) # alternatively: loss = loss_fn(*outputs, target) losses.append(loss.data.item()) loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f} '.format( batch_idx * len(data[0]), len(train_loader.dataset), 100. * batch_idx / len(train_loader), np.mean(losses))) total_loss += np.sum(losses) losses = [] return total_loss/(batch_idx + 1) def test_siamese_epoch(test_loader, model, loss_fn, args, log_interval=50): with torch.no_grad(): model.eval() losses = [] num_samples = 0 for batch_idx, (data, target) in enumerate(test_loader): num_samples += data[0].size(0) data = tuple(d.to(args.device) for d in data) target = target.to(args.device) outputs = model(data[0], data[1]) loss = loss_fn(outputs[0], outputs[1], target) losses.append(loss.data.item()) return np.sum(losses)/(batch_idx + 1) # + colab_type="code" id="_IqiBATeCuuh" colab={} start_epoch = 0 # needed for annealing learning rate in case of resuming of training for epoch in range(0, start_epoch): scheduler.step() # main training loop for epoch in range(start_epoch, args.num_epochs): scheduler.step() # train stage train_loss = train_siamese_epoch(siamese_train_loader, model, loss_fn, optimizer, args) message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format( epoch + 1, args.num_epochs, train_loss) # testing/validation stage test_loss = test_siamese_epoch(siamese_test_loader, model, loss_fn, args) message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, args.num_epochs, test_loss) print(message) # + [markdown] id="xOzgmRO91E0X" colab_type="text" # ## 3.5 Visualizations # + colab_type="code" id="rBBBuPjiCuup" colab={} train_embeddings_cl, train_labels_cl = extract_embeddings(train_loader, model, args) plot_embeddings(train_embeddings_cl, train_labels_cl, title='Train embeddings (constrastive loss)') test_embeddings_cl, test_labels_cl = extract_embeddings(test_loader, model, args) plot_embeddings(test_embeddings_cl, test_labels_cl, title='Test embeddings (contrastive loss)') # + [markdown] id="28NPJHe91E0b" colab_type="text" # In order to two compare vectors $x_1$ and $x_2$ we can use the `cosine similarity` # # $$\text{similarity}=\frac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert_2, \epsilon)}$$ # # An alternative is the Euclidean distance. # + [markdown] id="ObKaI7Oa1E0b" colab_type="text" # In order to save computation at query time we can pre-process our vectors and L2-normalize them. Now we can simply perform comparison by dot product # + [markdown] id="Ncftmsls1E0b" colab_type="text" # #### Exercise # Perform L2-normalization on the embeddings using `numpy` # + id="rigYIlwM1E0c" colab_type="code" colab={} # L2-normalize embeddings test_embeddings_norm = .... # + [markdown] id="azGVIKYV1E0e" colab_type="text" # #### Exercise # Write now a function `most_sim` that computes all dot products between a query vector and the dataset, extracts the indices of the `topk` most similar vectors and put thme in a list of tuples ( # + id="HuvS4r6D1E0f" colab_type="code" colab={} def most_sim(x, emb, topk=6): return None # + id="hF5DQ3F71E0h" colab_type="code" colab={} test_images_raw = get_raw_images(test_loader) # + id="TzxTj6jn1E0i" colab_type="code" colab={} def launch_query(test_embeddings_norm, test_images_raw, query_id=None): query_id = random.randint(0, test_embeddings_norm.shape[0]) if query_id is None else query_id query_vector = test_embeddings_norm[query_id,:] print(f'query_id: {query_id} | query_embedding: {query_vector}') knns = most_sim(query_vector, test_embeddings_norm) knn_images = np.array([test_images_raw[x[0]] for x in knns ]) title=['q: 1.0', f'1nn: {knns[1][1]:.3}', f'2nn: {knns[2][1]:.3}', f'3nn: {knns[3][1]:.3}', f'4nn: {knns[4][1]:.3}', f'5nn: {knns[5][1]:.3}'] show(torchvision.utils.make_grid(torch.from_numpy(knn_images)), title=title) # print(knns) # + id="ydZMwGHt1E0k" colab_type="code" colab={} for i in range(5): launch_query(test_embeddings_norm, test_images_raw) # + [markdown] colab_type="text" id="MbKXy6yQCuuu" # # Triplet network # We'll train a triplet network, that takes an anchor, positive (same class as anchor) and negative (different class than anchor) examples. The objective is to learn embeddings such that the anchor is closer to the positive example than it is to the negative example by some margin value. # # ![alt text](images/anchor_negative_positive.png "Source: FaceNet") # Source: [2] *Schroff, Florian, <NAME>, and <NAME>. [Facenet: A unified embedding for face recognition and clustering.](https://arxiv.org/abs/1503.03832) CVPR 2015.* # # **Triplet loss**: $L_{triplet}(x_a, x_p, x_n) = max(0, m + \lVert f(x_a)-f(x_p)\rVert_2^2 - \lVert f(x_a)-f(x_n)\rVert_2^2$\) # + [markdown] id="pBLyLplg1E0m" colab_type="text" # ## 4.1 Architecture # We will first define the triplet architecture on top of our `EmbeddingNet` # # #### Exercise # # Fill in the forward part of `TripleNet` # + id="b2kAbre51E0o" colab_type="code" colab={} class TripletNet(nn.Module): def __init__(self, embedding_net): super(TripletNet, self).__init__() self.embedding_net = embedding_net def forward(self, x1, x2, x3): # missing 3 lines here return output1, output2, output3 def get_embedding(self, x): return self.embedding_net(x) # + [markdown] id="yCRCt5Fj1E0q" colab_type="text" # ## 4.2 Data loader # We will also need to adapt our data loader to fetch triplets of images # + id="MvAuRt6J1E0s" colab_type="code" colab={} from torch.utils.data import Dataset from torch.utils.data.sampler import BatchSampler from PIL import Image class TripletMNIST(Dataset): """ Train: For each sample (anchor) randomly chooses a positive and negative samples Test: Creates fixed triplets for testing """ def __init__(self, mnist_dataset): self.mnist_dataset = mnist_dataset self.train = self.mnist_dataset.train self.transform = self.mnist_dataset.transform if self.train: self.train_labels = self.mnist_dataset.train_labels self.train_data = self.mnist_dataset.train_data self.labels_set = set(self.train_labels.numpy()) self.label_to_indices = {label: np.where(self.train_labels.numpy() == label)[0] for label in self.labels_set} else: self.test_labels = self.mnist_dataset.test_labels self.test_data = self.mnist_dataset.test_data # generate fixed triplets for testing self.labels_set = set(self.test_labels.numpy()) self.label_to_indices = {label: np.where(self.test_labels.numpy() == label)[0] for label in self.labels_set} random_state = np.random.RandomState(29) triplets = [[i, random_state.choice(self.label_to_indices[self.test_labels[i].item()]), random_state.choice(self.label_to_indices[ np.random.choice( list(self.labels_set - set([self.test_labels[i].item()])) ) ]) ] for i in range(len(self.test_data))] self.test_triplets = triplets def __getitem__(self, index): if self.train: img1, label1 = self.train_data[index], self.train_labels[index].item() positive_index = index while positive_index == index: positive_index = np.random.choice(self.label_to_indices[label1]) negative_label = np.random.choice(list(self.labels_set - set([label1]))) negative_index = np.random.choice(self.label_to_indices[negative_label]) img2 = self.train_data[positive_index] img3 = self.train_data[negative_index] else: img1 = self.test_data[self.test_triplets[index][0]] img2 = self.test_data[self.test_triplets[index][1]] img3 = self.test_data[self.test_triplets[index][2]] img1 = Image.fromarray(img1.numpy(), mode='L') img2 = Image.fromarray(img2.numpy(), mode='L') img3 = Image.fromarray(img3.numpy(), mode='L') if self.transform is not None: img1 = self.transform(img1) img2 = self.transform(img2) img3 = self.transform(img3) return (img1, img2, img3), [] def __len__(self): return len(self.mnist_dataset) # + [markdown] id="779Hh05A1E0u" colab_type="text" # ## 4.3 Loss function # # #### Exercise # # Fill in the missing parts of the `triplet loss`: # $L_{triplet}(x_a, x_p, x_n) = max(0, m + \lVert f(x_a)-f(x_p)\rVert_2^2 - \lVert f(x_a)-f(x_n)\rVert_2^2$\) # + id="kRaxvIFJ1E0u" colab_type="code" colab={} class TripletLoss(nn.Module): """ Triplet loss Takes embeddings of an anchor sample, a positive sample and a negative sample """ def __init__(self, margin): super(TripletLoss, self).__init__() self.margin = margin def forward(self, anchor, positive, negative, size_average=True): distance_positive = None # fill in code distance_negative = None # fill in code # you can again use ReLU instead of max losses = None # fill in code return losses.mean() if size_average else losses.sum() # + [markdown] id="PyCDVrG81E0w" colab_type="text" # ## 4.4 Training # + colab_type="code" id="jv4DvFucCuuu" colab={} triplet_train_dataset = TripletMNIST(train_dataset) # Returns triplets of images triplet_test_dataset = TripletMNIST(test_dataset) args.batch_size = 128 kwargs = {'num_workers': args.num_workers, 'pin_memory': True} triplet_train_loader = torch.utils.data.DataLoader(triplet_train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs) triplet_test_loader = torch.utils.data.DataLoader(triplet_test_dataset, batch_size=args.batch_size, shuffle=False, **kwargs) margin = 1. embedding_net = EmbeddingNet() model = TripletNet(embedding_net) loss_fn = TripletLoss(margin) model.to(args.device) loss_fn.to(args.device) args.lr = 1e-3 optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=0.1, last_epoch=-1) n_epochs = 5 log_interval = 100 # + [markdown] id="AsMAV5Kt1E0z" colab_type="text" # #### Exercice # # Code your own train/test sequences similarly to the previous examples. # Watch out for some differences though. # + id="HIeuIH4y1E00" colab_type="code" colab={} def train_triplet_epoch(train_loader, model, loss_fn, optimizer, args, log_interval=100): model.train() losses = [] total_loss, num_samples = 0, 0 # fill in code here return total_loss/(batch_idx + 1) def test_triplet_epoch(test_loader, model, loss_fn, args, log_interval=50): losses = [] num_samples = 0 # fill in code here return np.sum(losses)/(batch_idx + 1) # + id="6TfLZH1V1E02" colab_type="code" colab={} start_epoch = 0 # needed for annealing learning rate in case of resuming of training for epoch in range(0, start_epoch): scheduler.step() # main training loop for epoch in range(start_epoch, args.num_epochs): scheduler.step() # train stage train_loss = train_triplet_epoch(triplet_train_loader, model, loss_fn, optimizer, args) message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format( epoch + 1, args.num_epochs, train_loss) # testing/validation stage test_loss = test_triplet_epoch(triplet_test_loader, model, loss_fn, args) message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, args.num_epochs, test_loss) print(message) # + [markdown] id="LEkIWNDq1E05" colab_type="text" # ## 4.5 Visualizations # + colab_type="code" id="ysh4Ry7ZCuu_" colab={} train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader, model, args) plot_embeddings(train_embeddings_tl, train_labels_tl, title='Train triplet embeddings') test_embeddings_tl, test_labels_tl = extract_embeddings(test_loader, model, args) plot_embeddings(test_embeddings_tl, test_labels_tl, title='Val triplet embeddings') # + id="zw0y8bgB1E08" colab_type="code" colab={} # L2-normalize embeddings test_embeddings_tl_norm = test_embeddings_tl / np.linalg.norm(test_embeddings_tl, axis=-1, keepdims=True) # + id="YnNl2ffL1E09" colab_type="code" colab={} test_images_raw = get_raw_images(test_loader) # + id="h9JOhZTY1E0_" colab_type="code" colab={} for i in range(5): launch_query(test_embeddings_tl_norm, test_images_raw)
Notebooks/03_siamese/03_siamese_triplet_mnist_colab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # library to interact with the blockchain from web3 import Web3 # Connect to the BSC using Web3 # + # bsc mainnet url bsc = "https://bsc-dataseed.binance.org/" # create connection web3 = Web3(Web3.HTTPProvider(bsc)) # check if it's connected print(web3.isConnected()) # - # Let's get data about the SafeMoon token # + # SafeMoon token address safemoon_address = web3.toChecksumAddress("0x8076c74c5e3f5852037f31ff0093eeb8c8add8d3") # SafeMoon Application Binary Interface (ABI) # it's hardcoded here, but can be also retrieved by using the bscscan API for example safemoon_abi = """ [{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"minTokensBeforeSwap","type":"uint256"}],"name":"MinTokensBeforeSwapUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"tokensSwapped","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"ethReceived","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"tokensIntoLiqudity","type":"uint256"}],"name":"SwapAndLiquify","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bool","name":"enabled","type":"bool"}],"name":"SwapAndLiquifyEnabledUpdated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[],"name":"_liquidityFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"_maxTxAmount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"_taxFee","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"subtractedValue","type":"uint256"}],"name":"decreaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"tAmount","type":"uint256"}],"name":"deliver","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"excludeFromFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"excludeFromReward","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"geUnlockTime","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"includeInFee","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"includeInReward","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"addedValue","type":"uint256"}],"name":"increaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"isExcludedFromFee","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"isExcludedFromReward","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"time","type":"uint256"}],"name":"lock","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"tAmount","type":"uint256"},{"internalType":"bool","name":"deductTransferFee","type":"bool"}],"name":"reflectionFromToken","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"renounceOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"liquidityFee","type":"uint256"}],"name":"setLiquidityFeePercent","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"maxTxPercent","type":"uint256"}],"name":"setMaxTxPercent","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bool","name":"_enabled","type":"bool"}],"name":"setSwapAndLiquifyEnabled","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"taxFee","type":"uint256"}],"name":"setTaxFeePercent","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"swapAndLiquifyEnabled","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"rAmount","type":"uint256"}],"name":"tokenFromReflection","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalFees","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"uniswapV2Pair","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"uniswapV2Router","outputs":[{"internalType":"contract IUniswapV2Router02","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"unlock","outputs":[],"stateMutability":"nonpayable","type":"function"},{"stateMutability":"payable","type":"receive"}] """ # + token_address = safemoon_address token_abi = safemoon_abi # with the address and abi the contract can be retrieved token_contract = web3.eth.contract(address=token_address, abi=token_abi) # - # we can see all the functions that the contract supports token_contract.all_functions() # Let's get some information about the token by asking the contract using its functions # + # get token's name token_name = token_contract.functions.name().call() print("name: " + token_name) # get token' symbol token_symbol = token_contract.functions.symbol().call() print("symbol: " + token_symbol) # what's the contract owner's address token_owner = token_contract.functions.owner().call() print("owner address: " + str(token_owner)) # get total supply of tokens token_total_supply = token_contract.functions.totalSupply().call() # the value has to be formatted to account for decimals token_total_supply = web3.fromWei(token_total_supply, 'gwei') print("total supply: " + str(token_total_supply)) # + # now let's find out if the owner of the token holds some of them in his/her wallet # we already now the address, so let's use the function balanceOf owner_balance = token_contract.functions.balanceOf(token_owner).call() owner_balance = web3.fromWei(owner_balance, 'gwei') print("the owner of the token has " + str(owner_balance) + " tokens in his/her wallet.") # - # Let's see how to retrieve some information from a wallet address # + # as an example, let's just take the same SafeMoon owner's address that we have from before wallet_address = web3.toChecksumAddress(token_owner) # and try to get the bnb balance for this address # the value has to be formatted from Wei to Ether, or BNB which is the same here wallet_balance = float(web3.fromWei(web3.eth.get_balance(wallet_address), 'ether')) print("bnb balance: " + str(round(wallet_balance, 10))) # - # Let's write a function the creates transactions # + # we need basically the address to which we're sending the bnb # and then our own address with its private key # because having access to the private key enables anybody to spend all of a wallet's tokens, # this information is very sensitive and should be encrypted. but we won't deal with this here def transfer(from_address, from_private, to_address, amount): # we need to create a nonce, which is a pseudo-random number used as a counter during the process of mining nonce = web3.eth.get_transaction_count(from_address) # define the transaction # gas and gasPrice are set to 21000 and 5 respectively, which is default tx = { 'nonce': nonce, 'from': from_address, 'to': to_address, 'value': web3.toWei(amount, 'ether'), 'gas': 21000, 'gasPrice': web3.toWei('5', 'gwei'), } # the transaction has to be signed signed_tx = web3.eth.account.sign_transaction(tx, from_private) # we send the transaction and get the hash, which will be returned by the function tx_hash = web3.eth.send_raw_transaction(signed_tx.rawTransaction) return(tx_hash) # + # then we can simply provide the addresses and the private key and the transaction will be sent! tx_hash = transfer( from_address="0x0000000000000000000000000000000000000000", from_private="...", to_address="0x0000000000000000000000000000000000000000", amount=0.01 # in bnb ) # after the transaction is sent, it takes some time for it to be completed # the next function allows us to wait for it to be executed and then provide us # the final data including if it was successful or not print(web3.eth.waitForTransactionReceipt(tx_hash)) # -
bsc.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from __future__ import print_function import numpy as np from bqplot import * from IPython.display import display # ## Get Data np.random.seed(0) data = np.random.randn(10, 10) # ## Basic Heat map # + col_sc = ColorScale() grid_map = GridHeatMap(color=data, scales={'color': col_sc}) fig = Figure(marks=[grid_map], padding_y=0.0) display(fig) # - # ## Heat map with axes # + x_sc, y_sc, col_sc = OrdinalScale(), OrdinalScale(reverse=True), ColorScale() grid_map = GridHeatMap(color=data, scales={'column': x_sc, 'row': y_sc, 'color': col_sc}) ax_x, ax_y = Axis(scale=x_sc), Axis(scale=y_sc, orientation='vertical') fig = Figure(marks=[grid_map], axes=[ax_x, ax_y], padding_y=0.0) display(fig) # - # ## Non Uniform Heat map # + x_sc, y_sc, col_sc = LinearScale(), LinearScale(reverse=True), ColorScale() ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') ## The data along the rows is not uniform. Hence the 5th row(from top) of the map ## is twice the height of the remaining rows. row_data = np.arange(10) row_data[5:] = np.arange(6, 11) column_data = np.arange(10, 20) grid_map = GridHeatMap(row=row_data, column=column_data, color=data, scales={'row': y_sc, 'column': x_sc, 'color': col_sc}) fig = Figure(marks=[grid_map], padding_y=0.0, axes=[ax_x, ax_y]) display(fig) # - print row_data.shape print column_data.shape print data.shape # ## Alignment of the data with respect to the grid # For a `N-by-N` matrix, `N+1` points along the row or the column are assumed to be end points. # + x_sc, y_sc, col_sc = LinearScale(), LinearScale(reverse=True), ColorScale() ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') row_data = np.arange(11) column_data = np.arange(10, 21) grid_map = GridHeatMap(row=row_data, column=column_data, color=data, scales={'row': y_sc, 'column': x_sc, 'color': col_sc}) fig = Figure(marks=[grid_map], padding_y=0.0, axes=[ax_x, ax_y]) display(fig) # - # By default, for `N` points along any dimension, data aligns to the `start` of the rectangles in the grid. # The grid extends infinitely in the other direction. By default, the grid extends infintely # towards the bottom and the right. # + x_sc, y_sc, col_sc = LinearScale(), LinearScale(reverse=True, max=15), ColorScale() ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') row_data = np.arange(10) column_data = np.arange(10, 20) grid_map = GridHeatMap(row=row_data, column=column_data, color=data, scales={'row': y_sc, 'column': x_sc, 'color': col_sc}) fig = Figure(marks=[grid_map], padding_y=0.0, axes=[ax_x, ax_y]) display(fig) # - # By changing the `row_align` and `column_align` properties, the grid can extend in the opposite direction # + x_sc, y_sc, col_sc = LinearScale(), LinearScale(reverse=True, min=-5, max=15), ColorScale() ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') row_data = np.arange(10) column_data = np.arange(10, 20) grid_map = GridHeatMap(row=row_data, column=column_data, color=data, scales={'row': y_sc, 'column': x_sc, 'color': col_sc}, row_align='end') fig = Figure(marks=[grid_map], padding_y=0.0, axes=[ax_x, ax_y]) display(fig) # - # For `N+1` points on any direction, the grid extends infintely in both directions # + x_sc, y_sc, col_sc = LinearScale(), LinearScale(reverse=True, min=-5, max=15), ColorScale() ax_x = Axis(scale=x_sc) ax_y = Axis(scale=y_sc, orientation='vertical') row_data = np.arange(9) column_data = np.arange(10, 20) grid_map = GridHeatMap(row=row_data, column=column_data, color=data, scales={'row': y_sc, 'column': x_sc, 'color': col_sc}, row_align='end') fig = Figure(marks=[grid_map], padding_y=0.0, axes=[ax_x, ax_y]) display(fig) # - # ## Changing opacity and stroke # + col_sc = ColorScale() grid_map = GridHeatMap(color=data, scales={'color': col_sc}, opacity=0.3, stroke='white') fig = Figure(marks=[grid_map], padding_y=0.0) display(fig)
examples/GridHeatMap.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:classification] # language: python # name: conda-env-classification-py # --- # ## Results # # The tune Logistic Regression model will now be scored against the test data # + # data manipulation import pandas as pd import os import numpy as np import pickle # metrics from sklearn import metrics # custom helper functions from src.models import cross_validate as cv # visualization import matplotlib.pyplot as plt # %matplotlib inline # %config InlineBackend.figure_format = 'png' import matplotlib as mpl mpl.rcParams['figure.dpi']= 300 # - DATA_PATH = '../data/processed/' OBS_PATH = os.path.join(DATA_PATH, 'observations_features.csv') RESULTS_PATH = os.path.join(DATA_PATH, 'results.csv') # ### Load Data obs = pd.read_csv(OBS_PATH) obs.head() # ### Test score # + MODEL_PATH = '../models/' LOG_PATH = os.path.join(MODEL_PATH, 'log_reg_tuned.pkl') log_reg_model = pickle.load(open(LOG_PATH, 'rb')) # + X_train, X_test, y_train, y_test = cv.create_Xy(obs) y_test_probs = log_reg_model.predict_proba(X_test)[:,1] y_test_score = (y_test_probs >= 0.5).astype(int) metrics.roc_auc_score(y_test, y_test_probs) # + fpr_gb, tpr_gb, _ = metrics.roc_curve(y_test, y_test_probs) plt.figure(figsize=(6,6)) # Plotting our Baseline.. plt.plot([0,1],[0,1]) plt.plot(fpr_gb, tpr_gb, label='Logistic Regression') plt.legend() title_style = { 'position':(0,1.05), 'horizontalalignment': 'left' } plt.xlabel('False Positive Rate', { 'horizontalalignment':'left', 'fontweight':'ultralight', 'position':(0,0)}) plt.ylabel('True Positive Rate', { 'horizontalalignment':'left', 'fontweight':'ultralight', 'position':(-0.5,1), 'rotation':'horizontal' }) plt.title('ROC curve', title_style ); # - # ### Odds ratio for each feature np.e ** log_reg_model.steps[2][1].coef_
notebooks/20. Results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # MNIST convolutional neural networks # # * MNIST data를 가지고 **convolutional neural networks**를 만들어보자. # * [소스: mnist_deep.py](https://github.com/tensorflow/tensorflow/blob/r1.4/tensorflow/examples/tutorials/mnist/mnist_deep.py) # * 네트워크 구조는 `03.cnn/01.1_.mnist.deep.with.estimator.ipynb` 에서 가져옴 # ### Import modules # + """A very simple MNIST classifier. See extensive documentation at https://www.tensorflow.org/get_started/mnist/beginners in version 1.4 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import clear_output import tensorflow as tf sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) # - # ### Import data # + # Load training and eval data from tf.keras (train_data, train_labels), (test_data, test_labels) = \ tf.keras.datasets.mnist.load_data() train_data = train_data / 255. train_labels = np.asarray(train_labels, dtype=np.int32) test_data = test_data / 255. test_labels = np.asarray(test_labels, dtype=np.int32) # - # ### Show the MNIST index = 100 print("label = {}".format(train_labels[index])) plt.imshow(train_data[index]) plt.colorbar() #plt.gca().grid(False) plt.show() # ### Set up dataset with `tf.data` # # #### create input pipeline with `tf.data.Dataset` # + tf.set_random_seed(219) batch_size = 32 max_epochs = 1 # for train train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_labels)) train_dataset = train_dataset.shuffle(buffer_size = 10000) train_dataset = train_dataset.repeat(count = max_epochs) train_dataset = train_dataset.batch(batch_size = batch_size) print(train_dataset) # for test test_dataset = tf.data.Dataset.from_tensor_slices((test_data, test_labels)) test_dataset = test_dataset.batch(batch_size = len(test_data)) print(test_dataset) # - # #### Define Iterator # tf.data.Iterator.from_string_handle의 output_shapes는 default = None이지만 꼭 값을 넣는 게 좋음 handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes) x, y = iterator.get_next() x = tf.cast(x, dtype = tf.float32) y = tf.cast(y, dtype = tf.int32) # ### Create the model def cnn_model_fn(x): """Model function for CNN. Args: x: input images mode: boolean whether trainig mode or test mode Returns: logits: unnormalized score funtion """ # Input Layer # Reshape X to 4-D tensor: [batch_size, width, height, channels] # MNIST images are 28x28 pixels, and have one color channel input_layer = tf.reshape(x, [-1, 28, 28, 1]) # Convolutional Layer #1 # Computes 32 features using a 5x5 filter with ReLU activation. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 28, 28, 1] # Output Tensor Shape: [batch_size, 28, 28, 32] conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #1 # First max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 28, 28, 32] # Output Tensor Shape: [batch_size, 14, 14, 32] pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 # Computes 64 features using a 5x5 filter. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 14, 14, 32] # Output Tensor Shape: [batch_size, 14, 14, 64] conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #2 # Second max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 14, 14, 64] # Output Tensor Shape: [batch_size, 7, 7, 64] pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Flatten tensor into a batch of vectors # Input Tensor Shape: [batch_size, 7, 7, 64] # Output Tensor Shape: [batch_size, 7 * 7 * 64] pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # Dense Layer # Densely connected layer with 1024 neurons # Input Tensor Shape: [batch_size, 7 * 7 * 64] # Output Tensor Shape: [batch_size, 1024] dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # Add dropout operation; 0.6 probability that element will be kept is_training = tf.placeholder(tf.bool) dropout = tf.layers.dropout(inputs=dense, rate=0.4, training=is_training) # Logits layer # Input Tensor Shape: [batch_size, 1024] # Output Tensor Shape: [batch_size, 10] logits = tf.layers.dense(inputs=dropout, units=10) return logits, is_training, input_layer # ### Build the model logits, is_training, x_image = cnn_model_fn(x) # ### Define loss and optimizer # + #y_one_hot = tf.one_hot(y, depth=10) #cross_entropy = tf.losses.softmax_cross_entropy(onehot_labels=y_one_hot, logits=y_pred) cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # - # ### Assign `tf.summary.FileWriter` graph_location = 'graphs/01.2.mnist.deep.with.tf.data' print('Saving graph to: %s' % graph_location) train_writer = tf.summary.FileWriter(graph_location) train_writer.add_graph(tf.get_default_graph()) # ### `tf.summary` with tf.name_scope('summaries'): tf.summary.scalar('loss/cross_entropy', cross_entropy) tf.summary.image('images', x_image) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) # merge all summaries summary_op = tf.summary.merge_all() # ### `tf.Session()` and train # + sess = tf.Session(config=sess_config) sess.run(tf.global_variables_initializer()) # train_iterator train_iterator = train_dataset.make_one_shot_iterator() train_handle = sess.run(train_iterator.string_handle()) # Train step = 1 while True: try: start_time = time.time() _, loss = sess.run([train_step, cross_entropy], feed_dict={handle: train_handle, is_training: True}) if step % 10 == 0: clear_output(wait=True) duration = time.time() - start_time examples_per_sec = batch_size / float(duration) epochs = batch_size * step / float(len(train_data)) print("epochs: {:.2f}, step: {}, loss: {:g}, ({:.2f} examples/sec; {:.3f} sec/batch)".format(epochs, step, loss, examples_per_sec, duration)) if step % 200 == 0: # summary summary_str = sess.run(summary_op, feed_dict={handle: train_handle, is_training: False}) train_writer.add_summary(summary_str, global_step=step) step += 1 #if step > 100: # break except tf.errors.OutOfRangeError: print("End of dataset") # ==> "End of dataset" break train_writer.close() print("training done!") # - # ### Test trained model # # * test accuracy: 0.9804 for 1 epoch # test_iterator test_iterator = test_dataset.make_one_shot_iterator() test_handle = sess.run(test_iterator.string_handle()) # ### Use `tf.metrics` # + accuracy, acc_op = tf.metrics.accuracy(labels=y, predictions=tf.argmax(logits, 1), name='accuracy') sess.run(tf.local_variables_initializer()) sess.run(acc_op, feed_dict={handle: test_handle, is_training: False}) print("test accuracy:", sess.run(accuracy)) # - # ### Plot test set np.random.seed(219) # + test_batch_size = 16 batch_index = np.random.choice(len(test_data), size=test_batch_size, replace=False) batch_xs = test_data[batch_index] batch_ys = test_labels[batch_index] y_pred = sess.run(logits, feed_dict={x: batch_xs, is_training: False}) fig = plt.figure(figsize=(16, 10)) for i, (px, py) in enumerate(zip(batch_xs, y_pred)): p = fig.add_subplot(4, 8, i+1) if np.argmax(py) == batch_ys[i]: p.set_title("y_pred: {}".format(np.argmax(py)), color='blue') else: p.set_title("y_pred: {}".format(np.argmax(py)), color='red') #p.imshow(px.reshape(28, 28), cmap='gray') p.imshow(px.reshape(28, 28)) p.axis('off') # - # ## 직접 실습 # # * 여러가지 hyper-parameter들을 바꿔가면서 accuracy를 높혀보자
01.2.mnist.deep.with.tf.data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from sklearn import tree from sklearn import datasets from sklearn import model_selection import graphviz # - # # Decision trees # (example from sklearn) iris = datasets.load_iris() X_train, X_test, y_train, y_test = model_selection.train_test_split(iris.data, iris.target, test_size=0.33, random_state=3) clf = tree.DecisionTreeClassifier(max_depth=2) clf = clf.fit(X_train, y_train) dot_data = tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph predictions = clf.predict(X_train) print ('Accuracy: %d ' % ((np.sum(y_train == predictions))/float(y_train.size)*100)) # ### Increasing the depth... clf = tree.DecisionTreeClassifier() clf = clf.fit(X_train, y_train) dot_data = tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph predictions = clf.predict(X_train) print ('Accuracy: %d ' % ((np.sum(y_train == predictions))/float(y_train.size)*100)) # ### And what if we look at the accuracy over the test data? predictions = clf.predict(X_test) print ('Accuracy: %d ' % ((np.sum(y_test == predictions))/float(y_test.size)*100))
archive/2018/demo5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Fizz Buzz # + active="" # Write a program that outputs the string representation of numbers from 1 to n. # But for multiples of three it should output “Fizz” instead of the number and for the multiples of five output “Buzz”. # For numbers which are multiples of both three and five output “FizzBuzz”. # # Example: # n = 15, # Return: # # [ "1", # "2", # "Fizz", # "4", # "Buzz", # "Fizz", # "7", # "8", # "Fizz", # "Buzz", # "11", # "Fizz", # "13", # "14", # "FizzBuzz" ] # - class Solution: def fizzBuzz(self, n): """ :type n: int :rtype: List[str] """ lst = [] for i in range(n): lst.append(self.FizzBuzz_X(i+1)) return lst def FizzBuzz_X(self, x): word = '' if x % 3 == 0: word += 'Fizz' if x % 5 == 0: word += 'Buzz' if word == '': word += str(x) return word n = 15 ans = Solution() ans.fizzBuzz(n)
412. Fizz Buzz.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Revisão de conceitos estatísticos III (Estimadores) # # Vamos explorar alguns conceitos estatísticos aplicados à análise de sinais. # importar as bibliotecas necessárias import numpy as np # arrays import matplotlib.pyplot as plt # plots from scipy.stats import norm plt.rcParams.update({'font.size': 14}) import IPython.display as ipd # to play signals import sounddevice as sd # # Um conjunto de ruídos brancos # # Vamos gerar um conjunto (ensemble) de ruídos brancos. Você pode encarar isso como se fosse uma série de gravações de um sinal aleatório. Por exemplo, como se você fosse medir a vibração em uma máquina complexa e tomasse $N_{rec}$ gravações. No caso do ruído branco temos um fenômeno governado por uma distribuição de probabilidade Gaussiana e constante com o tempo: # # \begin{equation} # p(x) = \mathcal{N}(\mu_x, \sigma_x) = \frac{1}{\sqrt{2\pi}}\mathrm{e}^{-\frac{1}{2\sigma^2}(x-\mu_x)^2} # \end{equation} # em que $\mu_x$ é a média e $\sigma_{x}$ é o desvio padrão. # + # Tempo fs = 2000 time = np.arange(0,2, 1/fs) # parâmetros de um fenômeno, cuja p(x) varia com o tempo mu_x = 1.2 - np.cos(2*np.pi*0.5*time) # média sigma_x = 1.2 - np.sin(2*np.pi*1*time) # A densidade de probabilidade x = np.arange(-10, 10, 0.01) px = norm.pdf(x, loc= mu_x, scale = sigma_x) # Número de gravações N_rec = 500 xt = np.random.normal(loc = mu_x, scale = sigma_x, size=(N_rec,len(time))) # - # # Vamos escolher 5 gravações do conjunto de sinais gravados para plotar $x(t)$ # + rec_choose = np.random.randint(51, size=5) color = ['b', 'r', 'k', 'g', 'magenta'] fig, axs = plt.subplots(5, 1, figsize = (10, 8)) for i in np.arange(5): axs[i].plot(time, xt[rec_choose[i]], linewidth = 1, color = color[i]) axs[i].axvline(0.25, color='grey',linestyle = '--', linewidth = 4, alpha = 0.8) axs[i].set_ylabel(r'$x(t)$') axs[i].set_xlim((0, time[-1])) axs[i].set_ylim((-4, 4)) axs[i].set_xlabel('tempo [s]') plt.tight_layout() # - # # Computar os momentos para o estimador do conjunto. # + # Escolha 1 ou 2 instantes de tempo e calcule as amostras n_1 = int(0.25*fs) n_2 = int(0.73*fs) # Primeiro momento Ex_1 = (1/N_rec)*np.sum(xt[:, n_1]) Ex_2 = (1/N_rec)*np.sum(xt[:, n_2]) print("E[x] para t_1 é {:.4f}".format(Ex_1)) print("E[x] para t_2 é {:.4f}".format(Ex_2)) # Segundo momento Ex2_1 = (1/N_rec)*np.sum((xt[:, n_1]-Ex_1)**2) Ex2_2 = (1/N_rec)*np.sum((xt[:, n_2]-Ex_2)**2) print("E[(x-\mu_x)^2] para t_1 é {:.4f}".format(Ex2_1)) print("E[(x-\mu_x)^2] para t_2 é {:.4f}".format(Ex2_2)) # - # # Agora, tome apenas 1 das gravações (estimador do sinal) # # Encare isso como sendo a única gravação que você fez. # + rec_choose = np.random.randint(51, size=1) xt_rec = np.reshape(xt[rec_choose,:], len(time)) plt.figure(figsize = (10, 3)) plt.plot(time, xt_rec, linewidth = 1, color = 'g') plt.ylabel(r'$x(t)$') plt.xlim((0, time[-1])) plt.ylim((-4, 4)) plt.xlabel('tempo [s]') plt.tight_layout() # - # # Computar os momentos do sinal. # + # Primeiro momento Ex = np.mean(xt_rec) Ex2 = np.std(xt_rec)**2 x_rms = np.sqrt((1/len(time))*np.sum(xt_rec**2)) print("E[x] para o sinal é {:.4f}".format(Ex)) print("E[(x-\mu_x)^2] para o sinal é {:.4f}".format(Ex2)) print("O quadrado do valor RMS é {:.4f}".format(x_rms**2))
Aula 52 - estacionaridade/.ipynb_checkpoints/estacionaridade-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #importing the depencdencies import pandas as pd df = pd.read_csv("Resources/cities.csv") df html = df.to_html("data.html") html.replace('\n', '') data.html
WebVisualizations/csv_to_html.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Information about this notebook # # This example script was provided as part of the Data Management Project (INF) within the TR-172 ["ArctiC Amplification: Climate Relevant Atmospheric and SurfaCe Processes, and Feedback Mechanisms" (AC)³](http://www.ac3-tr.de/) funded by the German Research Foundation (Deutsche Forschungsgemeinschaft, DFG) # # Author: <NAME>, [Institute of Environmental Physics](http://www.iup.uni-bremen.de), University of Bremen, Germany, <EMAIL> # # Github repository: https://github.com/ac3-tr/ac3-notebooks # # **Setup instructions for a reference Python Environment can be found on the [Github page](https://github.com/ac3-tr/ac3-notebooks)** import matplotlib.pyplot as plt import numpy as np import datetime as dt import cartopy.crs as ccrs from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import cartopy, os from netCDF4 import Dataset # %matplotlib inline # # OCEANET-ATMOSPHERE PollyXT measurements during POLARSTERN cruise PS106 # # # ## Dataset resources # # **Title:** OCEANET-ATMOSPHERE PollyXT measurements during POLARSTERN cruise PS106 # # **Author** <NAME>; <NAME>; <NAME>; <NAME>; <NAME> # # **Year** 2019 # # **Institute** <NAME> # # **Data hosted by** [PANGAEA](https://pangaea.de) # # **DOI** [10.1594/PANGAEA.899458](https://doi.org/10.1594/PANGAEA.899458) # # **License** [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/ "CC-BY-4.0") # # # ## Abstract # The dataset contains daily nc-files of the attenuated backscatter coefficient and volume depolarization from the OCEANET multiwavelength Raman and polarization lidar PollyXT (Engelmann et al., 2016, doi:10.5194/amt-9-1767-2016; Baars et al., 2017, doi:10.5194/amt-10-3175-2017) during Polarstern cruise PS106. # # The data is calibrated within the Cloudnet (Illingworth, 2007 doi:10.1175/BAMS-88-6-883) processing scheme. # # # ## Reading example dataset # # The list of binary NetCDF files can be downloaded via the link above and saved in the current working directory of this notebook. After checking the headersize of the ASCII file, the table can be read as a NumPy record array. The print statement checks the loaded fields in the recarray. datafolder = '../ac3/INF/pangaea_download/' datasetlist = np.recfromtxt(os.path.join(datafolder, 'Griesche-etal_2019.tab'), skip_header=23, delimiter='\t', names=True, encoding='utf8') print (datasetlist.dtype.names) # As an example we can download one file in the collection, using a *wget* if your system supports it. If not, you can go to the link printed and downloaded manually. url = datasetlist['URL_file'][0] fname = url[url.rfind('/')+1:] print ('Downloading ... ', url) try: if not os.path.exists(os.path.join(datafolder,fname)): os.system('wget -O '+os.path.join(datafolder,fname)+' '+url) print ('Download finished') else: print ('File already there...') except: print ('Could not download automatically, please try manual download!') # ## Read in the downloaded NetCDF file # fname = url[url.rfind('/')+1:] rootgrp = Dataset(os.path.join(datafolder,fname), 'r', format="NETCDF4") print (rootgrp.variables.keys()) # + par = 'att_beta_0407' pardescription = rootgrp.variables[par].long_name+' ['+rootgrp.variables[par].units+']' print (pardescription) p = rootgrp.variables[par][:] print (p.shape) #p = p[~np.isnan(p)] height = rootgrp.variables['height'][:] time = rootgrp.variables['time'][:] print (time.shape, height.shape) # - rootgrp.variables['time'] # ## Overview plot # # As an overview, the data fields can be plotted against flight time. Time is converted from seconds to hours. plt.plot(p[0,:]) # + Time, Height = np.meshgrid(time.flatten(), height.flatten()) fig, ax = plt.subplots(1, figsize=(14,7)) ax.set_ylabel('Height [km]') ax.set_xlabel('Hours after 2017-05-24 00:00') con = ax.pcolormesh(Time, Height/1e3, p.T, cmap=plt.get_cmap('Reds'), vmin=0, vmax=5e-7) fig.colorbar(con, label=pardescription) fig.autofmt_xdate() # + Time, Height = np.meshgrid(time.flatten(), height[:300].flatten()) fig, ax = plt.subplots(1, figsize=(14,7)) ax.set_ylabel('Height [km]') ax.set_xlabel('Hours after 2017-05-24 00:00') con = ax.pcolor(Time, Height/1e3, (p.T)[:300,:], cmap=plt.get_cmap('Reds'), vmin=0, vmax=2e-7) fig.colorbar(con, label=pardescription) fig.autofmt_xdate() # -
oceanet_atmosphere_pollyxt_PS106.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Initial data processing # # The initial dataset has been output in a rather bulky Excel pivot-table approach, and will need some processing to be ready to use. While I could make edits directly in excel, I'm hoping this will evolve into something which includes a structured data repository, and for that reason I want to be able to process the spreadsheets in python directly from source. # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set() plt.figure(figsize=(16,6)) # %matplotlib inline data_loc = "./RawData/" save_loc = "./ProcessedData/" # + # Input the oil products breakdown sheet. filename = "EuroStat_nrg_bal_c__oilProducts_1217174.xlsx" sheet = "Sheet 1" oilProducts = pd.read_excel(data_loc + filename, sheet_name=sheet, usecols="A:AE",skiprows=9) oilProducts.head(10) # - oilProducts.tail(5) # I can drop rows 0, 21, 22 and 23. oilProducts = oilProducts.drop(index=[0,21,22,23], axis=0) oilProducts # Let's check the data types. oilProducts.dtypes # + # So to tidy this up we need to: # a. change column "1990" to float46 # b. rename column "TIME" to "SIEC_labels" # c. reset the index so we get our 0 row back. oilProducts["1990"] = oilProducts["1990"].astype("float") oilProducts = oilProducts.rename(columns = {'TIME':'SIEC_Labels'}) oilProducts = oilProducts.reset_index(drop=True) oilProducts.head() # + # I think I'll transpose this so I have the years as a column. oilProducts = oilProducts.transpose().reset_index() oilProducts.columns = oilProducts.iloc[0] oilProducts = oilProducts.rename(columns = {'SIEC_Labels':'Year'}) oilProducts = oilProducts.drop(index=[0], axis=0) oilProducts = oilProducts.reset_index(drop=True) oilProducts.head() # - oilProducts.dtypes # + for col in oilProducts.columns: oilProducts[col] = pd.to_numeric(oilProducts[col], errors='coerce') oilProducts.dtypes # - oilProducts.to_csv(save_loc + "OilProductData_EU_1990-2019.csv", index=False) # # Sector dependency # # I've deliberately started with the breakdown of what products are made from oil because that's definitely the simpler of the two datasets. The sector dependency will need a bit more processing. # + # Input the oil sector dependency sheet. # This one includes merged cells as headings, which I'd rather deal with programmatically if that will work? # I'll try it and see. filename = "EuroStat_nrg_bal_c__sectorOilDependency_1209540.xlsx" sheet = "Sheet 1" oilSectors = pd.read_excel(data_loc + filename, sheet_name=sheet, usecols="A:AF",skiprows=8) oilSectors.head(10) # - # The data have been uploaded successfully, but the overall structure is messy and doesn't suit dataframe working. I'll need to do a bit of tidying to make this work # Have I uploaded any trailing data at the end? oilSectors.tail() # + # First: drop the columns that are entirely NAs and the trailing rows. oilSectors = oilSectors.drop(index=[43,44,45], axis=0) oilSectors = oilSectors.dropna(how="all", axis=1) oilSectors.head() # - oilSectors = oilSectors.transpose() oilSectors.columns = oilSectors.loc["TIME"] oilSectors.head() # + # Put another index on, change the name of "TIME" column to "Year", # and change the years with .1 on them to just the integer. oilSectors = oilSectors.reset_index() oilSectors = oilSectors.rename(columns = {'index':'Year', 'SIEC (Labels)': 'Energy Balance value type'}) oilSectors['Year'] = pd.to_numeric(oilSectors['Year'], errors='coerce') oilSectors['Year'] = oilSectors['Year'].apply(np.floor) oilSectors.head() # + oilSectors = oilSectors.drop(index=[0], axis=0) oilSectors["Year"] = oilSectors["Year"].astype(int) oilSectors.head() # + # I want rid of that meaningless "TIME" header. oilSectors.columns.name="data" print(oilSectors.columns.name) # I also want rid of the "NRG_BAL" column. oilSectors = oilSectors.dropna(how="all", axis=1) # reset index so I get my zero-index back. oilSectors = oilSectors.reset_index(drop=True) oilSectors.head() # + # Last challenge: my "NAN" character appears to be ":". Need to make that something more sensible. ## Code to check it's actually a string colon and not something else # print(oilSectors.iloc[3,6]) # The value in this cell is a ":". # oilSectors.loc[oilSectors["Gross inland consumption (Europe 2020-2030)"]==":"] oilSectors = oilSectors.replace(":", np.nan) oilSectors.head() # + # Check datatypes oilSectors.dtypes # - oilSectors.to_csv(save_loc + "OilSectorEnergyData_EU_1990-2019.csv", index=False) # # Final comments # # What I've done here is move from non-processable data format to a much cleaner data format. However I'm not yet entirely happy with the resulting data. In particular, my column headings which tell the sector are particularly unwieldy. So the next stage will be to make the dataset -- especially the sector dataset -- a lot tighter.
01_ProcessingTheRawData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # ## Flujo de trabajo de un proyecto de Machine Learning # # # 1. Conseguir y cargar datos # + [markdown] slideshow={"slide_type": "slide"} # ## Conseguir datos: # 1. Alguien nos entrega los datos (YAY!) # 2. Datasets públicos: # - [scikit learn](https://scikit-learn.org/stable/datasets.html) # - [tensorflow](https://www.tensorflow.org/datasets?hl=es-419) # - [Kaggle](https://www.kaggle.com/) # - [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets.php) # - [Datos Gobierno de España](https://datos.gob.es/) # - [Datos Madrid](https://datos.madrid.es/portal/site/egob) # - [Datos Castilla la Mancha](https://datosabiertos.castillalamancha.es/) # - [Datos BOE (Civio)](https://datos.civio.es/datasets/) # 3. Internet of Things # 4. Web crawling & APIs: [Scrapy](https://scrapy.org/) # - # # Cargar datos # - Texto # - CSV # - JSON # - XML # - Imagen # - Pillow # - OpenCV # - Audio # - Wavio # - PyAudio # ## CSV # + # Usando una lista de valores import csv with open('datasets/some.csv', newline='') as csv_file: reader = csv.reader(csv_file) for row in reader: print(row) # + # Obteniendo un diccionario {nombre_campo: valor_campo} import csv with open('datasets/some.csv', mode='r') as csv_file: csv_reader = csv.DictReader(csv_file) for row in csv_reader: print(row) # - # crea la carpeta .eoi_solutions si no existe para guardar los ficheros generados import os if not os.path.exists('.eoi_solutions'): os.makedirs('.eoi_solutions') # + # Escribir a partir de una lista de valores import csv with open('.eoi_solutions/employee.csv', mode='w') as csv_file: employee_writer = csv.writer(csv_file) employee_writer.writerow(['<NAME>', 'Accounting', 'November']) employee_writer.writerow(['<NAME>', 'IT', 'March']) # + # Escribir a partir de un diccionario {nombre_campo: valor_campo} import csv with open('.eoi_solutions/employee2.csv', mode='w') as csv_file: fieldnames = ['emp_name', 'dept', 'birth_month'] writer = csv.DictWriter(csv_file, fieldnames=fieldnames) writer.writeheader() writer.writerow({'emp_name': '<NAME>', 'dept': 'Accounting', 'birth_month': 'November'}) writer.writerow({'birth_month': 'March', 'dept': 'IT', 'emp_name': '<NAME>'}) # - # ## JSON # + import json from pprint import pprint # read file with open('datasets/some.json', 'r') as json_file: json_content = data = json.load(json_file) pprint(json_content) # + import json json_content = [ {'emp_name': '<NAME>', 'dept': 'Accounting', 'birth_month': 'November'}, {'birth_month': 'March', 'dept': 'IT', 'emp_name': '<NAME>'} ] with open(".eoi_solutions/employee.json", "w") as write_file: json.dump(json_content, write_file) # - from pprint import pprint print([{'email': '<EMAIL>', 'first_name': 'Anne', 'gender': 'Genderqueer', 'id': 1, 'ip_address': '172.16.31.10', 'last_name': 'Vignaux'}, {'email': '<EMAIL>', 'first_name': 'Brittani', 'gender': 'Bigender', 'id': 2, 'ip_address': '172.16.17.32', 'last_name': 'Yepiskov'}, {'email': '<EMAIL>', 'first_name': 'Mirna', 'gender': 'Male', 'id': 3, 'ip_address': '192.168.3.11', 'last_name': 'Idenden'}, {'email': '<EMAIL>', 'first_name': 'Yvor', 'gender': 'Female', 'id': 4, 'ip_address': '192.168.3.11', 'last_name': 'Licquorish'}, {'email': '<EMAIL>', 'first_name': 'Sayre', 'gender': 'Genderqueer', 'id': 5, 'ip_address': '172.16.58.3', 'last_name': 'Crannell'}, {'email': '<EMAIL>', 'first_name': 'Jacquenetta', 'gender': 'Non-binary', 'id': 6, 'ip_address': '172.16.31.10', 'last_name': 'Collacombe'}, {'email': '<EMAIL>', 'first_name': 'Miguel', 'gender': 'Agender', 'id': 7, 'ip_address': '192.168.3.11', 'last_name': 'Krahl'}, {'email': '<EMAIL>', 'first_name': 'Clementia', 'gender': 'Genderfluid', 'id': 8, 'ip_address': '192.168.127.12', 'last_name': 'Hattiff'}, {'email': '<EMAIL>', 'first_name': 'Andrey', 'gender': 'Genderqueer', 'id': 9, 'ip_address': '192.168.127.12', 'last_name': 'Itzig'}, {'email': '<EMAIL>', 'first_name': 'Junina', 'gender': 'Genderqueer', 'id': 10, 'ip_address': '172.16.17.32', 'last_name': 'Alexandrescu'}])
2-conseguir-cargar-datos.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import osmium as osm import pandas as pd import datetime import pytz class TimelineHandler(osm.SimpleHandler): def __init__(self): osm.SimpleHandler.__init__(self) self.elemtimeline = [] def element(self, e, elem_type): self.elemtimeline.append([elem_type, e.id, e.version, e.visible, pd.Timestamp(e.timestamp), e.uid, e.changeset, len(e.tags)]) def node(self, n): self.element(n, "node") def way(self, w): self.element(w, "way") def relation(self, r): self.element(r, "relation") # + tlhandler = TimelineHandler() tlhandler.apply_file("data/ottgat.osh.pbf") colnames = ['type', 'id', 'version', 'visible', 'ts', 'uid', 'chgset', 'ntags'] elements = pd.DataFrame(tlhandler.elemtimeline, columns=colnames) elements = elements.sort_values(by=['type', 'id', 'ts']) #elements.to_csv("output/elements.csv", date_format='%Y-%m-%d %H:%M:%S') #elements.to_csv("output/elements.csv") elements.to_csv("output/elements.csv", date_format='%Y-%m-%d')
notebooks/parse.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # default_exp combo # - #hide # %load_ext autoreload # %autoreload 2 # %matplotlib inline # # Combinations # # > This module contains all the code for running our experiments for Tango. To reproduce our results, please run each of the cells in this notebook. # + # export import json import ntpath import os import pickle import sys import time import numpy as np import pandas as pd from collections import OrderedDict from pathlib import Path from tango.eval import * from tango.utils import * # - # hide path = Path('/tf/data') # + # export def execute_retrieval_run(run, similarities): ranking = {} query = run["query"] corpus = run["dup_corpus"] + run["non_dup_corpus"] query_tokens = query.split("-") query_sims = similarities[query_tokens[0]][query_tokens[1]][query_tokens[2]] for doc in corpus: doc_tokens = doc.split("-") ranking[doc] = query_sims[(doc_tokens[1], doc_tokens[2])] ranking = OrderedDict(sorted(ranking.items(), key=lambda t: t[1], reverse=True)) return ranking def run_settings(settings, similarities, config, systems_allowed=[]): all_results = {} all_rankings = {} for setting in settings: all_results[setting] = [] all_rankings[setting] = [] runs = settings[setting] print("Running setting", setting) for run in runs: query = run["query"] query_tokens = query.split("-") if len(systems_allowed) != 0 and query_tokens[0] not in systems_allowed: continue ranking = execute_retrieval_run(run, similarities) ranking_results = evaluate_ranking(ranking, run["gnd_trh"]) ranking_results["setting"] = setting ranking_results["app"] = query_tokens[0] ranking_results["run_id"] = run["run_id"] ranking_results.update(config) ranking_info = {"run_id": run["run_id"], "query": query, "ranking": ranking} ranking_info["setting"] = setting ranking_info.update(config) all_results[setting].append(ranking_results) all_rankings[setting].append(ranking_info) return all_results, all_rankings def write_results(output_path, results): Path(output_path).mkdir(parents=True, exist_ok=True) all_results = [] for setting in results: pd.read_json(json.dumps(results[setting])).to_csv(os.path.join(output_path, setting + '.csv'), index=False, sep=";") all_results.extend(results[setting]) pd.read_json(json.dumps(all_results)).to_csv(os.path.join(output_path, 'all_results.csv'), index=False, sep=";") def write_rankings(output_path, rankings): Path(output_path).mkdir(parents=True, exist_ok=True) all_rankings = [] for setting in rankings: write_json_line_by_line(rankings[setting], os.path.join(output_path, setting + '.csv')) all_rankings.extend(rankings[setting]) write_json_line_by_line(all_rankings, os.path.join(output_path, 'all_rankings.csv')) # - # export def convert_results_format(sim_path, settings_path, out_path, models): similarities_path = sim_path output_results = out_path/"user_results_weighted_all" output_rankings = out_path/"user_rankings_weighted_all" techniques = ["weighted_lcs", "bovw", "lcs", "bovw_lcs", "bovw_weighted_lcs"] systems_allowed = [] settings_path = settings_path settings = load_settings(settings_path) all_results = {} all_rankings = {} for setting in settings: all_results[setting] = [] all_rankings[setting] = [] for model in models: sim_files = find_file("rankings_user_*.pkl", os.path.join(similarities_path, model)) for sim_file in sim_files: file_name = ntpath.basename(sim_file).split(".")[0] file_tokens = file_name.split("_") vwords = file_tokens[3] frames_per_sec = file_tokens[4] model_similarities = pickle.load(open(sim_file, 'rb')) for technique in techniques: print(model_similarities.keys()) similarities = model_similarities[technique] configuration = { "model": model, "vwords": vwords, "fps": frames_per_sec, "technique": technique } print("Running config: ", configuration) results, rankings = run_settings(settings, similarities, configuration, systems_allowed) for setting in settings: all_results[setting].extend(results[setting]) all_rankings[setting].extend(rankings[setting]) print("Writing results and rankings") write_results(output_results, all_results) write_rankings(output_rankings, all_rankings) print("done") sim_path = path/'outputs/results' settings_path = path/'outputs/evaluation_settings' out_path = path/'outputs' models = ['SimCLR'] convert_results_format(sim_path, settings_path, out_path, models) # export def get_info_to_ranking_results(ranking, ranking_results, run, dl_model, ir_model, weight_str, setting): new_model = dl_model[0] + "-" + ir_model[0] new_vwords = dl_model[1] new_fps = dl_model[2] + "-" + ir_model[1] + "ftk" new_technique = dl_model[3] + "-" + ir_model[2] new_config = "({},{})".format("-".join(dl_model), "-".join(ir_model)) new_config_weight = "({},{},{})".format(weight_str, "-".join(dl_model), "-".join(ir_model)) config = { "model": new_model, "vwords": new_vwords, "fps": new_fps, "technique": new_technique, "weight": weight_str, "model_config": new_config, "model_config_weight": new_config_weight } query = run["query"] query_tokens = query.split("-") ranking_results["setting"] = setting ranking_results["app"] = query_tokens[0] ranking_results["run_id"] = run["run_id"] ranking_results.update(config) ranking_info = {"run_id": run["run_id"], "query": query, "ranking": ranking} ranking_info.update(config) return ranking_info, ranking_results # + # export def tango_combined(out_path, dl_rankings_path, ir_rankings_path, settings_path, dl_models, ir_models): # all_data results_out_path = out_path/"tango_comb_results" rankings_out_path = out_path/"tango_comb_rankings" # calibration # settings_path = 'evaluation_settings_split/calibration' # results_out_path = "comb_results_calib" # rankings_out_path = "comb_rankings_calib" # test # settings_path = 'evaluation_settings_split/test' # results_out_path = "comb_results_test" # rankings_out_path = "comb_rankings_test" Path(results_out_path).mkdir(parents=True, exist_ok=True) Path(rankings_out_path).mkdir(parents=True, exist_ok=True) # read data settings = load_settings(settings_path) dl_rankings = read_json_line_by_line(dl_rankings_path) dl_rankings_by_config = group_dict(dl_rankings, lambda rec: (rec['model'], rec['vwords'], rec['fps'], rec['technique'],)) ir_rankings = read_json(ir_rankings_path) ir_rankings_by_config = group_dict(ir_rankings, lambda rec: (rec['model'], rec['fps'], rec['technique'],)) # best_dl_models = [ # "M00-10000vw-1ftk-bovw_weighted_lcs", "M00-10000vw-1ftk-weighted_lcs", # "M00-10000vw-5ftk-bovw_weighted_lcs", "M00-1000vw-1ftk-weighted_lcs", "M00-1000vw-5ftk-bovw", # "M00-1000vw-5ftk-bovw_weighted_lcs", "M00-5000vw-5ftk-weighted_lcs", "M01-1000vw-5ftk-bovw", # "M01-5000vw-5ftk-bovw_lcs", "M01-5000vw-5ftk-bovw_weighted_lcs", # "M01-1000vw-5ftk-bovw_weighted_lcs"] # best_ir_models = ["ocr+ir--1ftk-all_text", "ocr+ir--5ftk-all_text", "ocr+ir--5ftk-unique_frames", # "ocr+ir--5ftk-unique_words"] ir_model_apps_for_comb = { "1ftk-all_text": ['APOD', 'DROID', 'GNU', 'GROW'], "5ftk-all_text": ['APOD', 'DROID', 'GNU', 'GROW'], "5ftk-unique_frames": ['APOD', 'DROID', 'GROW'], "5ftk-unique_words": ['APOD', 'GROW'], } settings_to_run = ["setting2"] dl_models = list(filter(lambda rec: "-".join([rec[0], rec[1], rec[2], rec[3]]) in dl_models, dl_rankings_by_config.keys())) ir_models = list(filter(lambda rec: "-".join([rec[0], "", rec[1] + "ftk", rec[2]]) in ir_models, ir_rankings_by_config.keys())) # run combinations start_time = time.time() all_new_rankings = [] all_new_results = [] for dl_model in dl_models: dl_mod_rankings = group_dict(dl_rankings_by_config[dl_model], lambda rec: rec["setting"]) for ir_model in ir_models: ir_mod_rankings = group_dict(ir_rankings_by_config[ir_model], lambda rec: rec["setting"]) print(dl_model, ir_model) app_for_comb = ir_model_apps_for_comb["-".join([ir_model[1] + "ftk", ir_model[2]])] for setting in settings_to_run: dl_runs = group_dict(dl_mod_rankings[setting], lambda rec: rec["run_id"]) ir_runs = group_dict(ir_mod_rankings[setting], lambda rec: rec["runId"]) setting_runs = settings[setting] for run in setting_runs: run_id = run["run_id"] ir_run_ranking = ir_runs[str(run_id)][0]["ranking"] ir_run_ranking = dict( zip((rec["docName"] for rec in ir_run_ranking), (rec for rec in ir_run_ranking))) dl_run_ranking = dl_runs[run_id][0]["ranking"] # rankings based on all weights for weight in np.arange(0, 1.1, 0.1): new_ranking = {} for doc in dl_run_ranking: ir_score = 0 if doc not in ir_run_ranking else ir_run_ranking[doc]["score"] dl_score = dl_run_ranking[doc] new_score = weight * ir_score + (1 - weight) * dl_score new_ranking[doc] = new_score ranking = OrderedDict(sorted(new_ranking.items(), key=lambda t: t[1], reverse=True)) ranking_results = evaluate_ranking(ranking, run["gnd_trh"]) ranking_info, ranking_results = get_info_to_ranking_results(ranking, ranking_results, run, dl_model, ir_model, str(weight), setting) all_new_results.append(ranking_results) all_new_rankings.append(ranking_info) # ------------------------------------------------------------- # rankings of approach based vocabulary agreement (e.g., 0.2-0: 0.2 weight for all apps except TIME, # TOK, and 0 weight for TIME and TOK) for run in setting_runs: run_id = run["run_id"] ir_run_ranking = ir_runs[str(run_id)][0]["ranking"] ir_run_ranking = dict( zip((rec["docName"] for rec in ir_run_ranking), (rec for rec in ir_run_ranking))) dl_run_ranking = dl_runs[run_id][0]["ranking"] query = run["query"] query_tokens = query.split("-") app = query_tokens[0] for base_weight in np.arange(0.1, 1.1, 0.1): best_weights_name = f'{base_weight:0.1f}' + "-0" weight = 0 if app in app_for_comb: weight = base_weight new_ranking = {} for doc in dl_run_ranking: ir_score = 0 if doc not in ir_run_ranking else ir_run_ranking[doc]["score"] dl_score = dl_run_ranking[doc] new_score = weight * ir_score + (1 - weight) * dl_score new_ranking[doc] = new_score ranking = OrderedDict(sorted(new_ranking.items(), key=lambda t: t[1], reverse=True)) ranking_results = evaluate_ranking(ranking, run["gnd_trh"]) ranking_info, ranking_results = get_info_to_ranking_results(ranking, ranking_results, run, dl_model, ir_model, best_weights_name, setting) all_new_results.append(ranking_results) all_new_rankings.append(ranking_info) print("--- %s seconds ---" % (time.time() - start_time)) print("Writing data") pd.read_json(json.dumps(all_new_results)).to_csv(os.path.join(results_out_path, 'all_results.csv'), index=False, sep=";") write_json_line_by_line(all_new_rankings, os.path.join(rankings_out_path, 'all_rankings.json')) # + dl_ranking_path = path/'outputs/user_rankings_weighted_all/all_rankings.csv' ir_rankings_path = path/'models/tango_txt/tango_txt_rankings/all_rankings.json' settings_path = path/'outputs/evaluation_settings' tango_combined(dl_ranking_path, ir_rankings_path, settings_path) # - from nbdev.export import notebook2script notebook2script()
nbs/08_combo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch from torch.autograd import Variable x_data = [1.0, 2.0, 3.0] y_data = [2.0, 4.0, 6.0] w = Variable(torch.Tensor([1.0]), requires_grad=True) # + def forward(x): return x * w def loss(x, y): y_pred = forward(x) return 0.5 * (y_pred - y) ** 2 # - print("predict (before training)", 4, forward(4).data[0]) for epoch in range(10): for x_val, y_val in zip(x_data, y_data): l = loss(x_val, y_val) l.backward() print("\t grad:", x_val, y_val, w.grad.data[0]) w.data = w.data - 0.01 * w.grad.data w.grad.data.zero_() print("progress:", epoch, l.data[0]) print("predict (after training)", 4, forward(4).data[0])
001_pytorch/003_auto_gradient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Update e Delete # + import sqlite3 import random import time import datetime # Criando uma conexão conn = sqlite3.connect('dsa.db') # Criando um cursor c = conn.cursor() # Função para criar uma tabela def create_table(): c.execute('create table if not exists produtos(id integer primary key autoincrement not null, date text, '\ 'prod_name text, valor real)') # Função para inserir uma linha def data_insert(): c.execute("insert into produtos values(002, '02-05-2016', 'teclado', 130)") conn.commit() c.close() conn.close() # Usando variáveis para inserir dados def data_insert_var(): new_date = datetime.datetime.now() new_prod_name = 'SQL' new_valor = random.randrange(50,100) c.execute("insert into produtos (date, prod_name, valor) values (?, ?, ?)", (new_date, new_prod_name, new_valor)) conn.commit() # Leitura de dados def leitura_todos_dados(): c.execute("select * from produtos") for linha in c.fetchall(): print(linha) # Leitura de colunas específicos def leitura_colunas(): c.execute("select * from produtos") for linha in c.fetchall(): print(linha[3]) # Update def atualiza_dados(): c.execute("update produtos set prod_name = 'JavaScript' where valor = 90.0") conn.commit() # Delete def remove_dados(): c.execute("delete from produtos where valor = 60.0") conn.commit() # - atualiza_dados() # + active="" # leitura_todos_dados() # - # remove_dados() leitura_todos_dados() # ## fim
pyfund/6 - SQLite_ManipulandoDados/cap6-05-delete e insert.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # #### **To install rdkit** # ##### conda install -c rdkit rdkit # ##### pip install jupyterlab import os import pandas as pd # #### **Load Pre-Processed data from Part-1** df = pd.read_csv (os.path.join ("Datasets", "Part-1_Bioactivity_Preprocessed_Data.csv")) df # ### **Lipinskis rule of 5 :** # #### <NAME>, a Scientist at Pfizer, came up with a set of rule of thumb for evalutaing the **drug likeness** of compunds. Such druglikeness is based on the Absorption, Distribution, Metabolism and Excretion **(ADME)** that is also known as the pharmacokinetic profile. Lipinski analyzed all orally active FDA-approved drugs in the formulation of what is to be known as the Rule of Five, or Lipinski's Rule. # #### **1.** Weight of 1 Molecule should be less than 500gm/mole # #### **2.** log (P) < 5 # **P -> The Octanol : Water Partition coefficient** # **If all the drug goes into octanol, then the ratio of Octanol will be very large, then it will be soluble, but it cannot be taken in the blood stream. # In other words, it can't be too hydrophobic.** # ##### **3.** Less than 5 H-bond donors (-OH) # ##### **4.** Less than 10 H-bond acceptors (-O) # # If there are too many hydrogen bond donors and acceptors, then the molecule will be very hydrophilic, and will be very soluble in blood, but it will not be able to go through the blood-brain barrier. Also, most of the binding sites for the drug will be hydrophobic. # So, it can't be too hydrophobic, it can't be too hydrophilic. # --- import numpy as np from rdkit import Chem from rdkit.Chem import Descriptors, Lipinski # #### **Compute Lipinski Descriptors** # ##### We'll use the inbuilt function in rdkit Descriptors to find the values for the Lipinski Descriptors from the Canonical Smiles def get_lipinski_descriptors (canonical_smiles) : molecules = [] for canonical_smile in canonical_smiles : # print (canonical_smile) molecule = Chem.MolFromSmiles (canonical_smile) molecules.append (molecule) descriptors = [] for molecule in molecules : descriptors_MolWt = Descriptors.MolWt (molecule) descriptors_MolLogP = Descriptors.MolLogP (molecule) descriptors_NumHDonors = Lipinski.NumHDonors (molecule) descriptors_NumHAcceptors = Lipinski.NumHAcceptors (molecule) row = np.array ([descriptors_MolWt, descriptors_MolLogP, descriptors_NumHDonors, descriptors_NumHAcceptors]) descriptors.append (row) column_names = ["MolecularWeight", "LogP", "NumHDonors", "NumHAcceptors"] descriptors = pd.DataFrame (data = descriptors, columns = column_names) return descriptors df_lipinski = get_lipinski_descriptors (df.canonical_smiles.values) df_lipinski df_combined = pd.concat ([df, df_lipinski], axis = 1) df_combined # #### **Conversion of IC50 values into PIC 50 values** # **`PIC50 = - log10 (IC50)`** # 1. This is being done for the IC50 values to be more uniformly distributed. # 2. The IC50 values are contained in the standard value column, and they are in nM (nano Molar Unit). # 3. We'll multiply them with 10^9 to convert them to Molar (from nano Molar). # 4. Then, we'll find the -log10 to convert the IC50 values into PIC50. df_combined.standard_value.describe () # #### Step 1 : Cap the values to 100000000 # + standard_values_capped = [] for standard_value in df_combined.values [:, 2] : if standard_value > 100000000 : standard_values_capped.append (100000000) else : standard_values_capped.append (standard_value) df_combined ['standard_value_capped'] = standard_values_capped df_capped = df_combined.drop ('standard_value', axis = 1) df_capped.standard_value_capped.describe () df_capped # - # #### Step 2 : Convert IC50 values to PIC50 # Convert from nM (nano Molar) to Molar by multiplying by 10^-9 df_capped.standard_value_capped = df_capped.standard_value_capped * 1e-9 # #### **Convert from IC50 to PIC50 using :** # **`PIC50 = - log10 (IC50)`** df_capped_PIC50 = df_capped.copy () df_capped_PIC50 = df_capped_PIC50.rename (columns = {"standard_value_capped": "PIC50"}) df_capped_PIC50.PIC50 = -np.log10 (df_capped_PIC50.PIC50.values) print (df_capped_PIC50.head (5)) print (df_capped.head (5)) print (df_capped.standard_value_capped.describe ()) print (df_capped_PIC50.PIC50.describe ()) # #### **Remove the intermediate Bio-Activity class from the dataset** # ##### This is being done to get a simple comparison between the two Bio-Activity classes df_two_class = df_capped_PIC50 [df_capped_PIC50.bioactivity_class != 'intermediate'] df_two_class.to_csv (os.path.join ("Datasets", "Part-2_bioactivity_two_class_pic50.csv")) df_two_class # #### **Exploratory Data Analysis (Chemical Space Analysis) via Lipinski descriptors** # ##### **Import Libraries** # import seaborn as sns # sns.set (style = 'ticks') # import matplotlib.pyplot as plt # #### **Frequency plots for bioactivity class** # + import matplotlib.pyplot as plt import seaborn as sns sns.countplot (x = 'bioactivity_class', data = df_two_class, edgecolor = 'black') plt.xlabel ("Bioactivity Class") plt.ylabel ("Frequency") plt.savefig (os.path.join ("Observations", "Part-2_Bioactivity_Class_Frequency.pdf")) # - # #### **Scatter plot of Molecular Weight vs. Log (P)** # + plt.figure (figsize = (10, 7)) sns.scatterplot (x = 'MolecularWeight', y = 'LogP', hue = 'bioactivity_class', data = df_two_class, size = 'PIC50', edgecolor = 'black') plt.xlabel ("Molecular Weight", fontweight = 'bold') plt.ylabel ("LogP", fontweight = 'bold') plt.savefig (os.path.join ("Observations", "Part-2_Scatter_Mol_Wt_vs_LogP.pdf")) # - # #### **Box Plots** # ##### **Bioactivity Class vs. PIC50** plt.figure (figsize = (7, 10)) sns.boxplot (x = 'bioactivity_class', y = 'PIC50', data = df_two_class) plt.xlabel ('Bioactivity Class') plt.ylabel ('PIC50') plt.savefig (os.path.join ("Observations", "Part-2_Box_Bioactivity_vs_PIC50.pdf")) # #### **Statistical Analysis - <NAME> U Test** # ##### **"Statistical Significance"** refers to the probability that the observed result could have occurred randomly if it has no true underlying effect. ... If you run statistical tests on questions that are likely to be true, your findings are less likely to be false. # ##### **Source :** https://www.nngroup.com/articles/understanding-statistical-significance/ # ##### **In other words, if a result exists in the sample, then we have evidence that it also exists in the population** # ##### **We can use p-value to find statistical significance** def mann_whitney (descriptor) : from numpy.random import seed, randn from scipy.stats import mannwhitneyu # Seed the Random Number Generator seed (1) # Actives and inactives selection = [descriptor, 'bioactivity_class'] df = df_two_class [selection] active = df [df.bioactivity_class == 'active'] active = active [descriptor] selection = [descriptor, 'bioactivity_class'] df = df_two_class [selection] inactive = df [df.bioactivity_class == 'inactive'] inactive = inactive [descriptor] stat, p = mannwhitneyu (active, inactive) alpha = 0.05 if p > alpha : interpretation = 'Same distribution (fail to reject H0)' else : interpretation = 'Different distribution (reject H0)' results = pd.DataFrame ({'Descriptor' : descriptor, 'Statistics' : stat, 'p' : p, 'alpha' : alpha, 'Interpretation' : interpretation}, index = [0]) filename = 'mannwhitneyu_' + descriptor + '.csv' #import os #results.to_csv (os.path.join ("Observations", "Part-2_" + filename)) return results mann_whitney_pic50 = mann_whitney ('PIC50') mann_whitney_pic50.to_csv (os.path.join ("Observations", "Part-2_pIc50_Mann_Whitney.csv")) mann_whitney_pic50 # #### **Molecular Weight** sns.boxplot (x = 'bioactivity_class', y = 'MolecularWeight', data = df_two_class) plt.savefig (os.path.join ("Observations", "Part-2_Bioactivity_Class_vs_MolecularWeight.pdf")) mann_whitney_mol_wt = mann_whitney ("MolecularWeight") mann_whitney_mol_wt.to_csv (os.path.join ("Observations", "Part-2_Molecular_Weight_Mann_Whitney.csv")) mann_whitney_mol_wt # #### **LogP** # + sns.boxplot (x = 'bioactivity_class', y = 'LogP', data = df_two_class) plt.savefig (os.path.join ("Observations", "Part-2_Bioactivity_Class_vs_LogP.pdf")) # - mann_whitney_log_p = mann_whitney ('LogP') mann_whitney_log_p.to_csv (os.path.join ("Observations", "Part-2_Log_P_Mann_Whitney.csv")) mann_whitney_log_p # #### **NumHDonors** sns.boxplot (x = 'bioactivity_class', y = 'NumHDonors', data = df_two_class) plt.savefig (os.path.join ("Observations", "Part-2_Bioactivity_Class_vs_NumHDonors.pdf")) # #### **Mann Whitney U - Test** mann_whitney_num_h_donors = mann_whitney ('NumHDonors') mann_whitney_num_h_donors.to_csv (os.path.join ("Observations", "Part-2_Num_H_Donors_Mann_Whitney.csv")) mann_whitney_num_h_donors # #### **NumHAcceptors** sns.boxplot (x = 'bioactivity_class', y = 'NumHAcceptors', data = df_two_class) plt.savefig (os.path.join ("Observations", "Part-2_Bioactivity_Class_vs_NumHAcceptors.pdf")) mann_whitney_num_h_acceptors = mann_whitney ('NumHAcceptors') mann_whitney_num_h_acceptors.to_csv (os.path.join ("Observations", "Part-2_Num_H_Acceptors_Mann_Whitney.csv")) mann_whitney_num_h_acceptors
Part-2_Exploratory_Drug_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from IPython.display import HTML HTML('<iframe src=http://medium.com/@shawlu95 width=600 height=300></iframe>') # - from IPython.display import IFrame help(IFrame) IFrame(src="http://medium.com/@shawlu95", width=600, height=300)
handy_syntax/external_site.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python3 # name: python3 # --- # # 2. Language Modeling and Neural Networks # This post is about language modeling and its relation to neural networks. We will start with what may very well be the simplest task possible: creating a **bigram language model**. # # ## 2.1 Bigram Language Model # To start, in case it not clear, what is a language model? A language model is a model of the probabilities of sequences of words. In english, we refer to a sequence of words a sentence. So, for example, if we had the sentence: # # ``` # The quick brown fox jumps over the lazy dog. # ``` # # A language model will allow us to calculate: # # $$p\Big(\text{The quick brown fox jumps over the lazy dog.}\Big)$$ # # The form that the above probability distribution takes is what makes up the model, and typically that is going to involve making some assumptions about the structure of language and how sentences are formed. # # ### 2.1.1 What is a Model anyways? # I want to take a moment to be very clear here and describe exactly what a model is. A model is trying to capture some real world phenomema (i.e. language, motion, finances, etc), and it will never be 100% correct. It is always going to make simplifying assumptions. The idea is that they will be correct most of the time, but some of the time they will be incorrect. For example, Newtonian Mechanics was determined to be incorrect (based on work by Einstein), but it still proves to be useful! # # ### 2.1.2 What is a Bigram? # A **bigram** is simply two consecutive words in a sentence. So, from our above example, the bigrams would be: # # ``` # The quick # quick brown # brown fox # fox jumps # jump over # over the # the lazy # lazy dog # ``` # # We could also have **trigrams** and **n-grams**, which deal with three and $n$ consecutive words respectively. In terms of the bigram model, we are going to be modeling each bigram as a probability: # # $$Bigram \; model: p\big(w_t \mid w_{t-1}\big)$$ # # So for example, we could have: # # $$p\big(brown \mid quick\big) = 0.5$$ # # $$p\big(the \mid the\big) = 0$$ # # In the above, all the statement is saying is: the probability of seeing the word `brown` given that we just saw the word `quick` is 0.5. Now, how do we actually find these probabilities? We just count! So, to find that $p\big(brown \mid quick\big) = 0.5$, we would simply count up how many times $quick \rightarrow brown$ appears in our documents, and how many times $brown$ appears, and then divide the former by the later. This will give us the **maximum likelihood probability**: # # $$p\big(brown \mid quick\big) = \frac{count(quick \rightarrow brown)}{count(quick)}$$ # # I would like to clarify what I mean when I refer to **documents**. Generally speaking, we are going to have some training data - a list of exmaple sentences - to create our model. For our purposes, we will mostly be using wikipedia, but in general documents just refer to a set of files that contains sentences. This sometimes will be called a **corpus**. # # ### 2.1.3 What is a Language Model # Returning to the idea of a language model, recall that we want to know the probability of an entire sentence. How can bigrams help us do that? As per our previous discussion, this is going to involve making some assumptions. Let's look at a simpler example, specifically the sentence: # # ``` # I like dogs. # ``` # # Our goal is to find $p\big(I like dogs\big)$. Well, we can apply the **rules of conditional probability** here. For a quick refresher, recall that the rules of conditional probability state that: # # $$p\big(A \mid B\big) = \frac{p \big(A \cap B \big)}{p\big(B\big)}$$ # # $$p \big(A \cap B \big) = p\big(A \mid B\big) p\big(B\big)$$ # # This can of course be extended to 3 variables like so: # # $$p \big(A, B, C \big) = p \big(C \mid A, B\big) p\big(A, B\big)$$ # # Which, in the case of our sentence above would leave us with: # # $$p \big(I, like, dogs \big) = p \big(dogs \mid I, like\big) p\big(I, like\big)$$ # # We can apply this rule of conditional probability yet again the joint probability $ p\big(I, like\big)$, resultin in: # # $$p \big(I, like, dogs \big) = p \big(dogs \mid I, like\big) p\big(like \mid I\big) p \big(I\big)$$ # # This could simply be continued if we had a longer sentence. This process encapsulates what is known as the **chain rule** of probability. So, why is the above important? Well, if we look at our resulting expression above, we can see that one of the probabilities is part of the bigram model: # # <br> # <center> # <span style="color:#0000cc">$p \big(dogs \mid I, like\big)$</span> # <span style="color:#ff0000">$p\big(like \mid I\big)$</span> # <span style="color:#0000cc">$p \big(I\big)$</span> # </center> # # Now, the two other terms in blue are _not_ bigrams, but that is okay! We can still calculate them using maximum likelihood estimation. For the unigram $p(I)$, this is simply the number of times $I$ appears in the corpus, relative to the total corpus length: # # $$p\big(I\big) = \frac{count(I)}{corpus \; length }$$ # # For the trigram $p \big(dogs \mid I, like\big)$, we would need to perform the following counts: # # $$p \big(dogs \mid I, like\big) = \frac{count(I, like, dogs)}{count(I, like)}$$ # # We can extend the above logic to sentences of any length. So, if we were dealing with a sentence: # # ``` # A B C D E # ``` # # We could model it as: # # $$p\big( A, B, C, D, E\big) = p\big(E \mid A, B, C, D\big) p\big( D \mid A, B, C\big) p\big( C \mid A, B\big) # p\big( B \mid A \big) p\big( A\big)$$ # # Note, above we are using commas to separate our words, which makes it look like a joint probability. However, we must keep in mind that we are looking at a sequence, and not simply a joint probability. With that said, what we should be taking away from the above equation is that modeling these $n$-grams will at some point become problematic. For example, if we return to our original sentence: # # ``` # The quick brown fox jumps over the lazy dog. # ``` # # Perhaps this is the only sentence like this in our corpus! We know that: # # ``` # The quick brown fox jumps over the lazy cat. # ``` # # Is a valid and reasonable sentence. However, if it never shows up in our corpus, its maximum likelihood probability is 0. Zero is not an accurate probability in this case, since we _know_ that the sentence makes sense, and that our language model should allow for it. # # ### 2.1.4 Add-One Smoothing # One simple way to overcome these zero probabilities is to add a small number to each count, instead of performing vanilla maximum-likelihood counting. For instance, if we have a vocabulary size of $V$, our probability would look like: # # $$p_{smooth}\big(B \mid A\big) = \frac{count(A \rightarrow B) + 1}{count(A) + V}$$ # # We add $V$ to the denominator to ensure that our probabilities sum to one. This process ensures that even if a phrase does not appear in our corpus, it still has a small probability of occuring. # # ### 2.1.5 The Markov Assumption # Another thing we can do is make the **Markov Assumption**. This is that whatever you see _now_ depends only on what you saw in the previous step. Mathematically this looks like: # # $$p\big( w_t \mid w_{t-1}, w_{t-2}, ... , w_1 \big) = p \big(w_t \mid w_{t-1}\big)$$ # # This is know as a **first order markov** because it only depends on one previous term. For example, in our previous situation when modeling `A B C D E` we ended up with: # # $$p\big( A, B, C, D, E\big) = p\big(E \mid A, B, C, D\big) p\big( D \mid A, B, C\big) p\big( C \mid A, B\big) # p\big( B \mid A \big) p\big( A\big)$$ # # If we made the markov assumption, the first term on the right would be reduced to: # # $$p\big(E \mid A, B, C, D\big) = p\big(E \mid D\big)$$ # # The entire sentence would be reduced to: # # $$p\big( A, B, C, D, E\big) = p\big(E \mid D\big) p\big( D \mid C\big) p\big( C \mid B\big) # p\big( B \mid A \big) p\big( A\big)$$ # # We end up with a probability consisting entirely of bigrams and one unigram! Why is this important? Well, we need to keep in mind that the longer a sentence, the less likely it is to appear in our training data. This is because our training data makes up only a tiny fraction of the entire space of possible sentences. However, very short phrases like bigrams are going to be very common. So, while phrases such as: # # ``` # The quick brown fox jumps over the lazy cat. # # The quick brown fox jumps over the lazy lizard. # ``` # # Are not likely to appear in our corpus, phrases such as `lazy cat` and `lazy lizard` most likely do. Hence, it is easier to model the probability for `lazy lizard` than it is to model the probability for `The quick brown fox jumps over the lazy lizard.`. This in turn makes the full sentence much more probable. # # ## 2.2 Creating a Bigram Language Model with NLTK # We are about to create a bigram language model in code using NLTK, but before we do there are a few things to consider. First, we know that probabilities are always between 0 and 1, and that the full joint probability of our bigram model is just the multiplication of each bigram probability in the sentence: # # $$p\big(w_1,...,w_T \big) = p\big(w_1\big) \prod_{t=2}^T p \big( w_t \mid w_{t-1}\big)$$ # # We also know that multiplying two numbers less than one together will always yield a smaller number. The result is that if we just keep multiplying probabilities together, we may encounter the **underflow** problem, which means that we hit the limit of numerical precision that our computer can handle, and it will just round down to 0. The solution to this is to use the **log probability** instead: # # $$log \Big(p\big(w_1,...,w_T \big)\big) = log \Big(p\big(w_1\big)\Big) \sum_{t=2}^T log \Big( p \big( w_t \mid w_{t-1}\big) \Big)$$ # # We can use this because we know that the log function is **monotonically** increasing, so if $A > B$ then $log(A) > log(B)$. The other thing that we are going to want to do is **normalize** each sentence. Since probabilities are between 0 and 1, log probabilities are always negative. Hence, the longer our sentences, the more negative numbers we are going to add together. This means that if we compare raw log probabilities, there is always going to be a bias towards shorter sentences. Shorter sentences will always have a higher log probabilty, simply because they have fewer negative numbers to add together. For example: # # $$logp\big( \text{the the the} \big) > logp\big( \text{A real, but much longer sentence than the one to left} \big)$$ # # To solve this, we can just compare the log probabilities, divided by the length of the sentence, $T$: # # $$\frac{1}{T}logp\big(w_1,...,w_T \big) = \frac{1}{T} \Big[ logp\big(w_1\big)\Big) \sum_{t=2}^T logp \big( w_t \mid w_{t-1}\big) \Big)\big]$$ # # ## 2.3 Bigram Language Model in Code # We will start with our imports: import numpy as np import operator from nltk.corpus import brown # And then write a few functions to load our data: # + def get_sentences(): """Returns 57,430 sentences from the brown corpus. Each sentence is a list of individual string tokens.""" return brown.sents() def get_sentences_with_word2idx(): """Converts sentences from word representation to index representation. Assign a unique integer, starting from 0, to every word that appears in the corpus. Returns a dictionary that contains a mapping from every word to its corresponding index.""" sentences = get_sentences() indexed_sentences = [] i = 2 word2idx = {'START': 0, 'END': 1} for sentence in sentences: indexed_sentence = [] for token in sentence: token = token.lower() if token not in word2idx: word2idx[token] = i i += 1 indexed_sentence.append(word2idx[token]) indexed_sentences.append(indexed_sentence) print('Vocabulary size: ', i) return indexed_sentences, word2idx KEEP_WORDS = set([ 'king', 'man', 'queen', 'woman', 'italy', 'rome', 'france', 'paris', 'london', 'britain', 'england', ]) def get_sentences_with_word2idx_limit_vocab(n_vocab=2000, keep_words=KEEP_WORDS): sentences = get_sentences() indexed_sentences = [] i = 2 word2idx = {'START': 0, 'END': 1} idx2word = ['START', 'END'] word_idx_count = { 0: float('inf'), 1: float('inf'), } for sentence in sentences: indexed_sentence = [] for token in sentence: token = token.lower() if token not in word2idx: idx2word.append(token) word2idx[token] = i i += 1 # keep track of counts for later sorting idx = word2idx[token] word_idx_count[idx] = word_idx_count.get(idx, 0) + 1 indexed_sentence.append(idx) indexed_sentences.append(indexed_sentence) # ---- Restrict vocab size ---- # Set all the words that should be kept to infinity so that they are included when # we pick the most common words for word in keep_words: word_idx_count[word2idx[word]] = float('inf') # Sort word counts dictionary by value, in descending order sorted_word_idx_count = sorted(word_idx_count.items(), key=operator.itemgetter(1), reverse=True) word2idx_small = {} new_idx = 0 idx_new_idx_map = {} for idx, count in sorted_word_idx_count[:n_vocab]: word = idx2word[idx] # print(word, count) word2idx_small[word] = new_idx idx_new_idx_map[idx] = new_idx new_idx += 1 # let 'unknown' be the last token word2idx_small['UNKNOWN'] = new_idx unknown = new_idx assert('START' in word2idx_small) assert('END' in word2idx_small) for word in keep_words: assert(word in word2idx_small) # map old idx to new idx sentences_small = [] for sentence in indexed_sentences: if len(sentence) > 1: new_sentence = [idx_new_idx_map[idx] if idx in idx_new_idx_map else unknown for idx in sentence] sentences_small.append(new_sentence) return sentences_small, word2idx_small # - # Now we can start with our language model: def get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=1): # Structure of bigram probability matrix will be: # (last word, current word) -> probability # Utilizing add-1 smoothing bigram_probs = np.ones((V, V)) * smoothing for sentence in sentences: for i in range(len(sentence)): if i == 0: # Beginning word bigram_probs[start_idx, sentence[i]] += 1 else: # Middle word bigram_probs[sentence[i-1], sentence[i]] += 1 if i == len(sentence) - 1: # Final Word # We update the bigram for last -> current # AND current -> End otken bigram_probs[sentence[i], end_idx] += 1 bigram_probs /= bigram_probs.sum(axis=1, keepdims=True) return bigram_probs if __name__ == '__main__': # Load in the data # Note: sentences are already converted to sequences of word indexes # Note: you can limit the vocab size if you run out of memory sentences, word2idx = get_sentences_with_word2idx_limit_vocab(10000) # Vocab size V = len(word2idx) print("Vocab size:", V) # Treat beginning of sentence and end of sentence as bigrams # START -> first word # last word -> END start_idx = word2idx['START'] end_idx = word2idx['END'] # A matrix where: # - row = last word # - col = current word # value at [row, col] = p(current word | last word) bigram_probs = get_bigram_probs(sentences, V, start_idx, end_idx, smoothing=0.1) def get_score(sentence): score = 0 for i in range(len(sentence)): if i == 0: # Beginning word score += np.log(bigram_probs[start_idx, sentence[i]]) else: # Middle word score += np.log(bigram_probs[sentence[i-1], sentence[i]]) # Final word score += np.log(bigram_probs[sentence[-1], end_idx]) return score / (len(sentence) + 1) # Map word indexes back to real words - helpful to display sentences idx2word = dict((v, k) for k, v in word2idx.items()) def get_words(sentence): return ' '.join(idx2word[i] for i in sentence) # when we sample a fake sentence, we want to ensure not to sample start token or end token sample_probs = np.ones(V) sample_probs[start_idx] = 0 sample_probs[end_idx] = 0 sample_probs /= sample_probs.sum() # Test our model on real and fake sentences while True: # real sentence real_idx = np.random.choice(len(sentences)) real = sentences[real_idx] # fake sentence fake = np.random.choice(V, size=len(real), p=sample_probs) print("REAL:", get_words(real), "SCORE:", get_score(real)) print("FAKE:", get_words(fake), "SCORE:", get_score(fake)) # input your own sentence custom = input("Enter your own sentence:\n") custom = custom.lower().split() # check that all tokens exist in word2idx (otherwise, we can't get score) bad_sentence = False for token in custom: if token not in word2idx: bad_sentence = True if bad_sentence: print("Sorry, you entered words that are not in the vocabulary") else: # convert sentence into list of indexes custom = [word2idx[token] for token in custom] print("SCORE:", get_score(custom)) cont = input("Continue? [Y/n]") if cont and cont.lower() in ('N', 'n'): break # # 3. Neural Bigram Model #
NLP/02-NLP_with_Deep_Learning-02-language-modeling-and-neural-networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Metadata # # ``` # Course: DS 5001 # Moduele: 01 -- Homework KEY # ``` # # Instructions # # Using the notebook we reviewed in class as your guide (`M01_03_first_foray.ipynb`), extend this notebook (after the **Code** header below) to import the text contained the file `pg42324.txt` as a data frame of lines (not chunks). Once you have done this, answer the questions in **Questions**. # # Submit this notebook to the Assignment in Gradescope as a PDF. # # Be sure to fill out your full name and UVA ID at the top of this document. # # Questions # ## What is the title of novel associated with the text file? # **Answer**: Frankenstein # ## How many tokens does the raw text have? # # By raw text, we mean the text as-is, with all the Gutenberg boilerplate removed> # **Answer**: 80985 # ## What are the top 10 most frequent term strings in the raw text? # **Answer**: # # ``` # the 4575 # and 3120 # of 2918 # i 2918 # to 2257 # my 1819 # a 1497 # in 1232 # was 1064 # that 1060 # ``` # ## Compare this list with the top 10 term strings in the file we imported in class. Which subject pronoun is most frequest in each text? # **Answer**: # # # ``` # Persusion # ----------- # the 3501 # to 2862 # and 2851 # of 2684 # a 1648 # in 1439 # was 1336 # her 1202 # had 1187 # she 1143 # ``` # # * Persuasion = she # * Frankenstein = i # ## Provide a brief explanation for this difference, based on what you may know about the two novels. # **Answer**: One is written in the third first person, the other in the first (at least partly). # # Code import pandas as pd text = pd.DataFrame(open('../../labs/data/gutenberg//pg42324.txt', 'r').readlines(), columns=['line_str']) text.index.name = 'line_num' # ## Get title text.head() K = text.line_str.str.split(expand=True).stack().to_frame() K.index.names = ['lie_num','token_num'] K.columns = ['token_str'] K.head() # ## Find number of tokens K.shape[0] K['term_str'] = K.token_str.replace('\W+', '', regex=True).str.lower() K.sample(10) V = K.term_str.value_counts() # ## Get Most Frequent Words V.head(10) V
keys/M01_HW_KEY.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _cell_guid="14c68aad-0435-454b-8bc7-6556f60b034e" _uuid="58335a1bb472e68aaddc32827fdb99d9d44e4ab2" # # Introduction # # Congratulations! In this section we will put all of the things that we learned together to do some truly interesting things with some datasets. # # The exercises in this section are therefore more difficult! # # While working through the exercises, tTry using method chaning syntax (use the resource below if you don't know what method chaining means). Also, take advantage the hints we provide. # # # Relevant Resource # # * **[Method chaining resource](https://www.kaggle.com/residentmario/method-chaining-reference). ** # # # Set Up # **First, fork this notebook using the "Fork Notebook" button towards the top of the screen.** # Run the code cell below to load data and the libraries you'll use. # + _cell_guid="0559dd85-e762-4699-95d6-28840f18e910" _uuid="a4f1f60fb1c5c7d744bc9089fc58b4be8d082947" import pandas as pd pd.set_option('max_rows', 5) import sys sys.path.append('../input/advanced-pandas-exercises/') from method_chaining import * chess_games = pd.read_csv("../input/chess/games.csv") # + [markdown] _cell_guid="3070c6b9-5ac4-4604-91f7-5fe9128ccfb2" _uuid="2477f759f3c11e964f50c583f211f6e324bad518" # # Checking Answers # # Check your answers in each of the exercises that follow using the `check_qN` function provided in the code cell above (replacing `N` with the number of the exercise). For example here's how you would check an incorrect answer to exercise 1: # + _cell_guid="47ec828b-f315-4126-a534-5d0b5af6f973" _uuid="b8490e33adb514ab0bd4b92911fb4d1c9e018b51" check_q1(pd.DataFrame()) # + [markdown] _cell_guid="96b7eed1-d290-4da7-aeb0-92e7381e7db1" _uuid="dd23107bed53e3ab1ece7a091ed29d2dd826c6c5" # For the first set of questions, if you use `check_qN` on your answer, and your answer is right, a simple `True` value will be returned. # # For the second set of questions, using this function to check a correct answer will present you will an informative graph! # # If you get stuck, you may also use the companion `answer_qN` function to print the answer outright. # # # Preview Data # Run the cell below to preview the data # + _cell_guid="e37fcee3-75fc-4b4e-a64d-94620954fc76" _uuid="a3dd68c0acae64ea7f7b1eddb9056cfe93162f55" chess_games.head() # + [markdown] _cell_guid="0d4748cd-9a43-4928-9ba3-9c42a4bfbb0d" _uuid="76a8860fa325d3324028437fa4b6ec392e7a279e" # # Exercises # + [markdown] _cell_guid="be821524-d37d-4128-9d72-105ad898cb48" _uuid="9404e800ee7ca838451d8f420fa23f828fbc06f1" # **Exercise 1**: It's well-known that in the game of chess, white has a slight first-mover advantage against black. Can you measure this effect in this dataset? Use the `winner` column to create a `pandas` `Series` showing how often white wins, how often black wins, and how often the result is a tie, as a ratio of total games played. In other words, a `Series` that looks something like this: # # white 0.48 # black 0.44 # draw 0.08 # Name: winner, dtype: float64 # # Hint: use `len` to get the length of the initial `DataFrame`, e.g. the count of all games played. # + _cell_guid="130df29a-ff7c-4c43-8f50-2589f70c8a3b" _uuid="d5d654a0cf135ca0cde5381f45e559eb240ecd17" temp = chess_games.winner.value_counts()/len(chess_games) print (check_q1(temp), '\n\n', temp) # + [markdown] _cell_guid="9e7cc907-78c8-4771-8767-433b26a501fe" _uuid="0afa1a01f6339744f117e40bc564bb8199117fba" # **Exercise 2**: The `opening_name` field of the `chess_games` dataset provides interesting data on what the most commonly used chess openings are. However, it gives a bit _too_ much detail, including information on the variation used for the most common opening types. For example, rather than giving `Queen's Pawn Game`, the dataset often includes `Queen's Pawn Game: Zukertort Variation`. # # This makes it a bit difficult to use for categorical purposes. Here's a function that can be used to separate out the "opening archetype": # # ```python # lambda n: n.split(":")[0].split("|")[0].split("#")[0].strip() # ``` # # Use this function to parse the `opening_name` field and generate a `pandas` `Series` counting how many times each of the "opening archetypes" gets used. Hint: use a map. # + _cell_guid="25fd7631-0e96-4c7b-979c-b6b1571a6001" _uuid="4c831eb5dd7eff5b765e5f056c7301447d6d75d3" temp = chess_games.opening_name.map(lambda n: n.split(":")[0].split("|")[0].split("#")[0].strip()).value_counts() print (check_q2(temp), '\n\n', temp) # + [markdown] _cell_guid="77d52589-e77b-4f2a-8368-b648542d7cfd" _uuid="4c00c11c77310fbbb1d80c7ab0b9c2cec6a85d8c" # **Exercise 3**: In this dataset various players play variably number of games. Group the games by `{white_id, victory_status}` and count how many times each white player ended the game in `mate` , `draw`, `resign`, etcetera. The name of the column counting how many times each outcome occurred should be `n` (hint: `rename` or `assign` may help). # + _cell_guid="c718e12b-91bd-45f1-82e7-d9eba54bce1a" _uuid="0d8aceaf6237842bfeb5b715dbc661b3cd65b537" temp = chess_games.assign(n=0).groupby(['white_id', 'victory_status']).n.apply(len).reset_index() temp # + [markdown] _cell_guid="fbb9dbed-4013-43d2-aa6a-23b4b3553e41" _uuid="d8270a4063738d6a9cfc1f475e0b5b94e36689a2" # **Exercise 4**: There are a lot of players in the dataset who have only played one or a small handful of games. Create a `DataFrame` like the one in the previous exercise, but only include users who are in the top 20 users by number of games played. See if you can do this using method chaining alone! Hint: reuse the code from the previous example. Then, use `pipe`. # + _cell_guid="b7bdf858-155d-4fae-a21a-373dfb8056e2" _uuid="9a335c51d7c7b41c7af17b79927089882a9012d9" #chess_games.white_id.value_counts().sort_index() #temp['white_id'].value_counts().iloc[:20] temp = temp.pipe(lambda x: x.loc[x.white_id.isin(chess_games.white_id.value_counts().head(20).index)]) print (check_q4(temp), '\n\n', temp) # + _uuid="3931b5e39fac13032c01e192ad2cc4c3ae0ebaa6" chess_games.white_id.value_counts().head(20).index # + [markdown] _cell_guid="bd59912b-744e-4d7b-8f86-e787d7121d46" _uuid="c28a91527be0920116c8bf017a766fd28bf3343c" # Next, let's do some visual exercises. # # The next exercise uses the following dataset: # + _cell_guid="f5ac1f2a-4bc5-4c6a-9e77-9f1836aa9f27" _uuid="96a82b2ab00aeeb1c69806201713de3ab31c64e2" kepler = pd.read_csv("../input/kepler-exoplanet-search-results/cumulative.csv") kepler # + [markdown] _cell_guid="ed30c948-bf7d-4014-b46e-e6764b32887c" _uuid="4e97a338fe718b61703699a4f6b8e81c238f63b6" # **Exercise 5**: The Kepler space observatory is in the business of finding potential exoplanets (planets orbiting stars other suns) and, after collecting the evidence, generating whether or not to confirm, decline to confirm, or deny that a given space body is, in fact, an exoplanet. In the dataset above, the "before" status of the body is `koi_pdisposition`, and the "after" status is `koi_disposition`. # # Using the dataset above, generate a `Series` counting all of the possible transitions between pre-disposition and post-disposition. In other words, generate a `Series` whose index is a `MultiIndex` based on the `{koi_pdisposition, koi_disposition}` fields, and whose values is a count of how many times each possible combination occurred. # + _cell_guid="4bc2c673-ee88-4502-8b3a-a668567d59a8" _uuid="72cdeb2a1d37c760b9eaf8fb354681cf83b51f1a" kepler.koi_disposition.unique() # + _uuid="e5b8972307e2f4a8b8107a5791430ae6db0f34e4" check_q5(kepler.groupby(['koi_pdisposition', 'koi_disposition']).rowid.count()) # + [markdown] _cell_guid="7225ca0a-2fa8-4bd2-b286-2731bdac0982" _uuid="ceececcbd0f41b807d173ced0672320933cfa99c" # The next few exercises use the following datasets: # + _cell_guid="fda2b4e8-08e2-44b0-abc9-2160e63e64bc" _uuid="c776c108c02cce4fd1152b6330ccdac899ce8b55" wine_reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0) wine_reviews.head() # + _cell_guid="e4627770-9047-4003-b06b-0586935b66e1" _uuid="cd01a4aa45e2f908e8f9c0fc0b069ba87bc17adc" ramen_reviews = pd.read_csv("../input/ramen-ratings/ramen-ratings.csv", index_col=0) ramen_reviews.head() # + [markdown] _cell_guid="c8eae2c9-5dad-4969-8250-8738f763a909" _uuid="a26f10615034d4bd565f56764bf7a62b4ff0544e" # **Exercise 6**: As we demonstrated in previous workbooks, the `points` column in the `wine_reviews` dataset is measured on a 20-point scale between 80 and 100. Create a `Series` which normalizes the ratings so that they fit on a 1-to-5 scale instead (e.g. a score of 80 translates to 1 star, while a score of 100 is five stars). Set the `Series` name to "Wine Ratings", and sort by index value (ascending). # + _cell_guid="a265d37a-31c4-4adb-84ca-232821f03e6b" _uuid="3694ed668fe4f9b4487d91c3404d7436a12c4259" temp2 = wine_reviews.points.map(lambda x: (x-80)/4).value_counts().sort_index().rename_axis("Wine Ratings") print (check_q6(temp2)) #check_q6(pd.Series(temp2, name='Wine Ratings')) #wine_reviews.points.sort_values().plot.hist() # + [markdown] _cell_guid="0e73353b-5091-43c3-83db-50e8b9e42759" _uuid="b1d5ffc806de05d79260977923dba80468db7c79" # **Exercise 7**: The `Stars` column in the `ramen_reviews` dataset is the ramen equivalent to the similar data points in `wine_reviews`. Luckily it is already on a 0-to-5 scale, but it has some different problems...create a `Series` counting how many ramens earned each of the possible scores in the dataset. Convert the `Series` to the `float64` dtype and drop rames whose rating is `"Unrated"`. Set the name of the `Series` to "Ramen Ratings". Sort by index value (ascending). # + _cell_guid="7ea82b3d-dbb6-4f17-95cb-901aa966520b" _uuid="0c18aab67a0ee106e1d89fe528ea496b1694f25a" check_q7(ramen_reviews.Stars.replace('Unrated', None).dropna().astype('float64').value_counts().sort_index().rename_axis("Ramen Ratings")) #answer_q7() # + [markdown] _cell_guid="0cba2cab-5315-4a3c-b1f4-8f07da6901a4" _uuid="a80054f5a7bfa75c7bfbeacc65a480837291ab1d" # **Exercise 8**: We can see from the result of the previous exercise that whilst the wine reviewers stick to a strict 20-point scale, ramen reviews occassionally deviate into fractional numbers. Modify your answer to the previous exercise by rounding review scores to the nearest half-point (so 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, or 5). # + _uuid="3b577d20170972b0f99ef9afed5747791de13f5f" round(3.7, 0) # + _cell_guid="ee8497a9-a6a8-4a0d-b631-2be0ef64f5ba" _uuid="2d906a816ffccd120d1290dfc35c97fb9c01d634" check_q8(ramen_reviews.Stars.replace('Unrated', None).dropna().astype('float64').map(lambda x: int(x) if x - int(x) < 0.5 else int(x) + 0.5).value_counts().sort_index().rename_axis("Ramen Reviews")) # + [markdown] _cell_guid="9f6b04ae-5afa-459a-911e-c429b869ceb5" _uuid="e6f718a1192226af9e07e366cf57a2b11af3e23c" # # Congratulations # # You've finished the Pandas track. Many data scientist feel efficiency with Pandas is the most useful and practical skill they have, because it allows you to progress quickly in any project you have. # # You can take advantage of your Pandas skills by entering a [Kaggle Competition](https://www.kaggle.com/competitions) or answering a question you find interesting using [Kaggle Datasets](https://www.kaggle.com/datasets).
Tutorials/Pandas/Method Chaining.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Goal # # ## Questions # # * How is incorporator identification accuracy affected by the percent isotope incorporation of taxa? # * How variable is sensitivity depending on model stochasticity # * Each simulation has differing taxa as incorporators, therefore, the incorporators then differ by GC and abundance between simulations # # # ## Method # # * Using genome dataset created in the "dataset" notebook # # * Simulates isotope dilution or short incubations # * Method # * 25% taxa incorporate # * incorporation % same for all incorporators # * incorporation % treatments: 0, 5, 10, 25, 50 # * n-replicates = 10 # * Total treatments: 50 # # User variables workDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/' buildDir = os.path.join(workDir, 'percIncorpUnifRep') genomeDir = '/home/nick/notebook/SIPSim/dev/bac_genome1210/genomes/' R_dir = '/home/nick/notebook/SIPSim/lib/R/' # # Init import glob from os.path import abspath import nestly from IPython.display import Image, display # %load_ext rpy2.ipython # + language="R" # library(ggplot2) # library(dplyr) # library(tidyr) # library(gridExtra) # - if not os.path.isdir(buildDir): os.makedirs(buildDir) # # Creating input files (eg., fragments & communities) # ## Simulating fragments # !cd $buildDir; \ # SIPSim fragments \ # $genomeDir/genome_index.txt \ # --fp $genomeDir \ # --fr ../../515F-806R.fna \ # --fld skewed-normal,9000,2500,-5 \ # --flr None,None \ # --nf 10000 \ # --np 24 \ # 2> ampFrags.log \ # > ampFrags.pkl # ## Converting to kde object # !cd $buildDir; \ # SIPSim fragment_kde \ # ampFrags.pkl \ # > ampFrags_kde.pkl # ## Adding diffusion # !cd $buildDir; \ # SIPSim diffusion \ # ampFrags_kde.pkl \ # --np 24 \ # > ampFrags_kde_dif.pkl # # Running nestly # + # building tree structure nest = nestly.Nest() ## varying params nest.add('rep', range(1,11)) nest.add('percIncorp', [10, 25, 50]) ## set params nest.add('np_many', [24], create_dir=False) nest.add('np_few', [8], create_dir=False) nest.add('percTaxa', [25], create_dir=False) nest.add('abs', ['1e10'], create_dir=False) #nest.add('subsample', [20000], create_dir=False) nest.add('subsample_mean', [30000], create_dir=False) nest.add('subsample_scale', [5000], create_dir=False) nest.add('BD_min', [1.71], create_dir=False) nest.add('BD_max', [1.75], create_dir=False) nest.add('padj', [0.1], create_dir=False) nest.add('log2', [0.25], create_dir=False) nest.add('topTaxaToPlot', [100], create_dir=False) ## input/output files nest.add('buildDir', [buildDir], create_dir=False) nest.add('frag_file', ['ampFrags_kde_dif'], create_dir=False) nest.add('comm_file', ['comm.txt'], create_dir=False) nest.add('genome_index', [os.path.join(genomeDir, 'genome_index.txt')], create_dir=False) nest.add('R_dir', [R_dir], create_dir=False) # building directory tree nest.build(buildDir) # - bashFile = os.path.join(buildDir, 'SIPSimRun.sh') # + # %%writefile $bashFile # #!/bin/bash # symlinking input files ln -s {buildDir}/{frag_file}.pkl {frag_file}.pkl # Creating a community file SIPSim communities \ {genome_index} \ --n_comm 2 \ > comm.txt # simulating gradient fractions SIPSim gradient_fractions \ {comm_file} \ > fracs.txt # making incorp file SIPSim incorpConfigExample \ --percTaxa {percTaxa} \ --percIncorpUnif {percIncorp} \ > {percTaxa}_{percIncorp}.config # adding isotope incorporation to BD distribution SIPSim isotope_incorp \ {frag_file}.pkl \ {percTaxa}_{percIncorp}.config \ --comm {comm_file} \ --np {np_many} \ > {frag_file}_incorp.pkl # calculating BD shift from isotope incorporation SIPSim BD_shift \ {frag_file}.pkl \ {frag_file}_incorp.pkl \ --np {np_few} \ > {frag_file}_incorp_BD-shift.txt # simulating an OTU table SIPSim OTU_table \ {frag_file}_incorp.pkl \ {comm_file} \ fracs.txt \ --abs {abs} \ --np {np_few} \ > OTU_n2_abs{abs}.txt # subsampling from the OTU table (simulating sequencing of the DNA pool) SIPSim OTU_subsample \ --dist normal \ --dist_params loc:{subsample_mean},scale:{subsample_scale} \ OTU_n2_abs{abs}.txt \ > OTU_n2_abs{abs}_sub-norm.txt # making a wide table SIPSim OTU_wideLong -w \ OTU_n2_abs{abs}_sub-norm.txt \ > OTU_n2_abs{abs}_sub-norm_w.txt # making metadata (phyloseq: sample_data) SIPSim OTU_sampleData \ OTU_n2_abs{abs}_sub-norm.txt \ > OTU_n2_abs{abs}_sub-norm_meta.txt # - # !chmod 775 $bashFile # !cd $workDir; \ # nestrun -j 1 --template-file $bashFile -d percIncorpUnifRep --log-file log.txt # ### R analysis # + # %%writefile $bashFile # #!/bin/bash #-- R analysis --# export PATH={R_dir}:$PATH # plotting taxon abundances OTU_taxonAbund.r \ OTU_n2_abs{abs}.txt \ -r {topTaxaToPlot} \ -o OTU_n2_abs{abs} # plotting taxon abundances OTU_taxonAbund.r \ OTU_n2_abs{abs}_sub-norm.txt \ -r {topTaxaToPlot} \ -o OTU_n2_abs{abs}_subsub-norm # running DeSeq2 and making confusion matrix on predicting incorporators ## making phyloseq object from OTU table phyloseq_make.r \ OTU_n2_abs{abs}_sub-norm_w.txt \ -s OTU_n2_abs{abs}_sub-norm_meta.txt \ > OTU_n2_abs{abs}_sub-norm.physeq ## filtering phyloseq object to just taxa/samples of interest phyloseq_edit.r \ OTU_n2_abs{abs}_sub-norm.physeq \ --BD_min {BD_min} \ --BD_max {BD_max} \ > OTU_n2_abs{abs}_sub-norm_filt.physeq ## making ordination phyloseq_ordination.r \ OTU_n2_abs{abs}_sub-norm_filt.physeq \ OTU_n2_abs{abs}_sub-norm_bray-NMDS.pdf ## DESeq2 phyloseq_DESeq2.r \ OTU_n2_abs{abs}_sub-norm_filt.physeq \ --log2 {log2} \ --hypo greater \ > OTU_n2_abs{abs}_sub-norm_DESeq2 ## Confusion matrix DESeq2_confuseMtx.r \ {frag_file}_incorp_BD-shift.txt \ OTU_n2_abs{abs}_sub-norm_DESeq2 \ --padj {padj} # - # !chmod 775 $bashFile # !cd $workDir; \ # nestrun -j 30 --template-file $bashFile -d percIncorpUnifRep --log-file logR.txt # + # aggregating confusion matrix data ## table # !cd $workDir; \ # nestagg delim \ # -d percIncorpUnifRep \ # -k percIncorp,rep \ # -o ./percIncorpUnifRep/DESeq2-cMtx_table.csv \ # DESeq2-cMtx_table.csv ## overall # !cd $workDir; \ # nestagg delim \ # -d percIncorpUnifRep\ # -k percIncorp,rep \ # -o ./percIncorpUnifRep/DESeq2-cMtx_overall.csv \ # DESeq2-cMtx_overall.csv ## byClass # !cd $workDir; \ # nestagg delim \ # -d percIncorpUnifRep \ # -k percIncorp,rep \ # -o ./percIncorpUnifRep/DESeq2-cMtx_byClass.csv \ # DESeq2-cMtx_byClass.csv # - # # Analyzing the data # + magic_args="-i workDir" language="R" # # setwd(workDir) # # byClass = read.csv('./percIncorpUnifRep/DESeq2-cMtx_byClass.csv') # # byClass %>% head # + magic_args="-w 500 -h 350" language="R" # # col2keep = c('Balanced Accuracy', 'Sensitivity','Specificity') # # byClass.f = byClass %>% # filter(X %in% col2keep) %>% # mutate(percIncorp = as.character(percIncorp)) # # ggplot(byClass.f, aes(X, byClass, fill=percIncorp)) + # geom_boxplot(position='dodge') + # labs(y='Value') + # theme( # text = element_text(size=16), # axis.title.x = element_blank() # ) # -
ipynb/bac_genome/n1210/.ipynb_checkpoints/perc_incorp_unif_rep-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # This notebook is the scratch work behind my post on microbiome data availability. import pandas as pd fname = 'Disease datasets list - data blog post.tsv' df = pd.read_csv(fname, sep='\t') df.head() # All rows in "data availability" column are filled out df.shape, df.dropna(subset=['Data availability']).shape df['full_data'] = ['AWS' in str(i) for i in df['Status'].values] print(len([i for i in df['Status'].dropna().values if 'AWS' in i])) #df.dropna(subset=['Manual meta analysis?'])[['dataset_id', 'Manual meta analysis?', 'Data availability', 'Status']] df.groupby('full_data').size() df['Data availability'] = df['Data availability'].str.lower() df.groupby(['full_data', 'Data availability']).size() # # Have full data df.query('full_data == True').sort_values(by='Data availability') internet = ['ena', 'sra', 'mg-rast', 'author website'] df = df.rename(columns={'Data availability': 'data_avail'}) df.query('data_avail == @internet').sort_values(by='Clinical metadata availability') # ## Have full data, got data from internet df.query('full_data == True')\ .query('data_avail == @internet')\ .groupby(['data_avail', 'Clinical metadata availability'])\ .size() df.query('full_data == True').query('data_avail == @internet').shape # ## Have full data, didn't get data from internet df.query('full_data == True')\ .query('data_avail != @internet')\ .groupby(['data_avail', 'Clinical metadata availability'])\ .size() # # Don't have full data df.query('full_data == False').groupby('data_avail').size().sort_values() # ## Don't have full data, but raw data on SRA df.query('full_data == False').query('data_avail == "sra"') df.query('full_data == False').query('data_avail == "author website"') # ## Don't have full data, emailed df['data_avail'] = df['data_avail'].replace('would need to email', 'emailed') df.query('full_data == False').query('data_avail == "emailed"').shape df.query('full_data == False').query('data_avail == "emailed"').groupby("If emailed, response").size() # + got_data = ['Got data from authors', 'Responded, sent via ftp'] df = df.rename(columns={"If emailed, response": 'email_response'}) keepcols = ['Manual meta analysis?', 'data_avail', 'email_response', 'Comments on email exchange/data', 'Clinical metadata availability', 'Clinical metadata notes', 'Processing information', 'Other comments', u'Status', u'Name in AWS', u'Email sent out?', 'Disease', 'First and last authors'] df.query('full_data == False').query('data_avail == "emailed"').query('email_response == @got_data')[keepcols] # - df.query('full_data == False').query('data_avail == "emailed"').query('email_response == "data was being put up on SRA"')[keepcols + ['Year']]
_jupyter/2018-04-17-data-availability/2018-04-17-data-availability.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Overview # # `cycontext` comes with a default knowledge base which is loaded by default. While these rules will cover a large number of use cases, users will often want to customize or extend the modifiers included in a knowledge base. In cycontext, users can define their own modifiers and control their behavior through the `ContextItem` class. # # In this notebook, we'll dive deeper into the `ConTextItem` and `TagObject` classes and show how to use them to add and customize new rules # + import spacy from cycontext import ConTextItem, ConTextComponent from cycontext.viz import visualize_dep, visualize_ent # - nlp = spacy.load("en_core_web_sm", disable="ner") # # Modifiers # ## ConTextItem # The knowledge base of cycontext is defined by ConTextItem objects. A ConTextItem is instantiated with the following parameters: # # - **literal** (str): The actual string of a concept. If pattern is None, # this string will be lower-cased and matched to the lower-case string. # - **category** (str): The semantic class of the item. # - **pattern** (list or None): A spaCy pattern to match using token attributes. # See https://spacy.io/usage/rule-based-matching. # - **rule** (str): The directionality or action of a modifier. # One of ("forward", "backward", "bidirectional", or "terminate"). # - **allowed_types** (set or None): A set of target labels to allow a modifier to modify. # If None, will apply to any type not specifically excluded in excluded_types. # Only one of allowed_types and excluded_types can be used. An error will be thrown # if both or not None. # - **excluded_types** (set or None): A set of target labels which this modifier cannot modify. # If None, will apply to all target types unless allowed_types is not None. # - **max_targets** (int or None): The maximum number of targets which a modifier can modify. # If None, will modify all targets in its scope. # - **max_scope** (int or None): A number to explicitly limit the size of the modifier's scope # ## TagObject # # When a ConTextItem is matched to a string of text, it generates a `TagObject` which is stored in `doc._.context_graph.modifiers`. If it modifies any targets, these relationships can be found as tuples in `doc._.context_graph.edges`. The TagObject also contains a reference to the original ConTextItem. # # In addition to the attributes of the original ItemData such as **literal** and **category**, a TagObject contains the following attributes: # - **span**: The spaCy Span of the matched text # - **scope**: The spaCy Span of the Doc which is within the TagObject's scope. Any targets in this scope will be modified by the TagObject # - **start**: Start index # - **end**: End index (non-inclusive) # # Examples # ## 1. Default Rules # When you instantiate `ConTextComponent`, a default list of `ConTextItem`s is loaded and included in the `context.item_data` attribute. context = ConTextComponent(nlp, rules="default") context.item_data[:5] print(context.item_data[0]) print(type(context.item_data[0])) len(context.item_data) # We can also see the unique categories in the knowledge base by checking `context.categories`: context.categories # ## 2: Basic Usage # Here, we'll load a blank context component and define our own item data. We'll an example we've seen earlier, where we need to negate **"pneumonia"**: doc = nlp("There is no evidence of pneumonia.") # First, we instantiate context and pass in `rules=None`: context = ConTextComponent(nlp, rules=None) # Next, we'll define a ConTextItem with following arguments: # - `literal=`**"no evidence of"**: This is the string of text which ConText will look for in the text (case insensitive) # - `category=`**"NEGATED_EXISTENCE"**: The semantic class assigned to our modifier # - `rule=`**"forward"**: This defines the *directionality* of the rule. A later example shows more examples of this # # We'll leave the other arguments blank. Next, we instantiate our ConTextItem as `item` and put it in a list called `item_data`. item = ConTextItem(literal="no evidence of", category="NEGATED_EXISTENCE", rule="FORWARD") item_data = [item] # We then add the modifiers to ConText with the `context.add()` method: context.add(item_data) context.item_data # Now we can call context on our doc. This will typically happen under the hood as part of the nlp pipeline, but you can call it manually on a doc as well: context(doc) # We can see if any modifiers were created by context by looking at the `doc._.context_graph` attribute, which stores all of the information generated on a doc by context. `modifiers` stores the `TagObjects` created by context, and `edges` stores the relationships between the modifiers and targets. Here, we match a modifier with the custom `item_data` that we created, but there are no edges because there are no target concepts in doc.ents yet. print(doc._.context_graph) print(doc._.context_graph.modifiers) print(doc._.context_graph.edges) print(doc.ents) # Each element of `context_graph.modifiers` is a`TagObject`. Let's look at the tag object in this doc and see some of the attributes which are available: tag_object = doc._.context_graph.modifiers[0] # `tag_object.span` is the spaCy Span of the Doc which was matched, and has a `start` and `end` index: print(tag_object.span) print(tag_object.start, tag_object.end) # `tag_object.scope` shows what part of the sentence could be modified by the modifier. Any targets in this span of text will be modified: print(tag_object.scope) # We can also see the original `ConTextItem` object and attributes: print(tag_object.category, ",", tag_object.rule) # The reference to the original ConTextItem print(tag_object.context_item) assert tag_object.context_item is item_data[0] # ## Example 3: Pattern-matching # In this example, we'll use a matching pattern to generate a more flexible matching criteria to match multiple texts with a single ConTextItem. If only `literal` is supplied, the exact phrase is matched in lower case. spaCy offers powerful rule-based matching which operates on each token in a Doc. Matching patterns can use the text, regular expression patterns, linguistic attributes such as part of speech, and operators such as **"?"** (0 or 1) or **"*"** (0 or more) to match sequences of text. # # For more detailed information, see spaCy's documentation on rule-based matching: https://spacy.io/usage/rule-based-matching. # # The ConTextItem below has the same literal, categorym, and rule as our previous example, but it also includes a pattern which allows the tokens "evidence" and "of" to be optional. This will then match both "no evidence of" and "no" and assign both spans of text to be negation modifiers. item_data = [ConTextItem(literal="no evidence of", category="NEGATED_EXISTENCE", rule="forward", pattern=[{"LOWER": "no"}, {"LOWER": "evidence", "OP": "?"}, {"LOWER": "of", "OP": "?"}, ] )] context = ConTextComponent(nlp) context.add(item_data) texts = ["THERE IS NO EVIDENCE OF PNEUMONIA.", "There is no CHF."] docs = list(nlp.pipe(texts)) for doc in docs: context(doc) for doc in docs: print(doc._.context_graph.modifiers) # Under the hood, these matches are generated using two of spaCy's rule-based matching classes: # - **[PhraseMatcher](https://spacy.io/api/phrasematcher)** for literals # - **[Matcher](https://spacy.io/api/matcher)** for patterns context.matcher context.phrase_matcher # ## Example 3: Direction # The `rule` attribute defines which direction modifiers should operate. You can imagine an arrow starting at the modifier in a phrase and moving *towards* the target. If the modifier comes before the target, the arrow will move **forward** in the sentence all targets in the sentence *after* the TagObject will be modified. If **"backward"**, it will move **backward** in the sentence and match all targets *before*. If **"bidirectional"** it will look both ahead and behind. # # The scope of a modifier is bounded to be within the same sentence, so no modifier will affect targets in other sentences. This can be problematic in poorly split documents, but it prevents all targets in a document from being incorrectly modified by a ConText item. A scope is also defined by any termination points, which will be shown in the next example. item_data = [ConTextItem("no evidence of", "NEGATED_EXISTENCE", "FORWARD"), ConTextItem("is ruled out", "NEGATED_EXISTENCE", "BACKWARD"), ConTextItem("unlikely", "POSSIBLE_EXISTENCE", "BIDIRECTIONAL"), ] texts = ["No evidence of pneumonia.", "PE is ruled out", "unlikely to be malignant", "malignancy unlikely"] docs = nlp.pipe(texts) context = ConTextComponent(nlp, rules=None) context.add(item_data) for doc in docs: context(doc) modifier = doc._.context_graph.modifiers[0] print(doc) print("[{0}:{1}] will modify {2}".format(modifier.span, modifier.category, modifier.scope)) print(modifier.rule) print() # ## Example 4: Termination points # As said before, the scope of a modifier is originally set to the entire sentence either before after a TagObject, as defined by the ItemData's `rule` attribute. However, the scope can be modified by **termination points**, which is another TagObject with the rule **"TERMINATE"**. For example, in "There is no evidence of pneumonia but there is CHF", the negation modifier should modify "pneumonia" but not "CHF". This can be achieved by defining a ConTextItem to terminate at the word "but". text = "There is no evidence of pneumonia but there is CHF" item_data1 = [ConTextItem("no evidence of", "NEGATED_EXISTENCE", "FORWARD")] context = ConTextComponent(nlp, rules=None) context.add(item_data1) doc = nlp(text) context(doc) tag_object = doc._.context_graph.modifiers[0] tag_object # The scope includes both "pneumonia" and "CHF", so both would be negated tag_object.scope # Now add an additional ConTextItem with "TERMINATE" item_data2 = [ConTextItem("but", "CONJ", "TERMINATE")] context.add(item_data2) doc = nlp(text) context(doc) tag_object = doc._.context_graph.modifiers[0] # The scope now only encompasses "pneumonia" tag_object.scope # ## Example 5: Pruned modifiers # If two ConTextItems result in TagObjects where one is the substring of another, the modifiers will be pruned to keep **only** the larger span. For example, **"no history of"** is a negation modifier, while **"history of"** is a historical modifier. Both match the text "no history of afib", but only "no history of" should ultimately modify "afib". # # By default, prune is set to `True`, but can be set to `False` when initiating the context component, as shown below. item_data = [ConTextItem("no history of", "DEFINITE_NEGATED_EXISTENCE", "FORWARD"), ConTextItem("history", "HISTORICAL", "FORWARD"), ] text = "no history of" context = ConTextComponent(nlp, rules=None, prune=False) context.add(item_data) doc = nlp(text) context(doc) # Two overlapping modifiers doc._.context_graph.modifiers # Now set prune to True context = ConTextComponent(nlp, prune=True) context.add(item_data) doc = nlp(text) context(doc) # Only one modifier is left doc._.context_graph.modifiers # ## Example 6: Manually limiting scope # By default, the scope of a modifier is the **entire sentence** in the direction of the rule up until a termination point (see above). However, sometimes this is too much. In long sentences, this can cause a modifier to extend far beyond its location in the sentence. Some modifiers are really meant to be attached to a single concept, but they are instead distributed to all targets. # # To fix this, cycontext allows optional attributes in `ItemData` to limit the scope: `max_scope` and `max_targets`. Both attributes are explained below. # ### max_targets # Some modifiers should really only attach to a single target. For example, in the sentence below: # # **"Pt presents with diabetes, pneumonia vs COPD"** # # **"vs"** indicates uncertainty, but *only* between **"pneumonia"** and **"COPD"**. **"Diabetes"** should not be affected. We can achieve this by creating a bidirectional rule with a `max_targets` of **1**. This will limit the number of targets to 1 *on each side* of the tag object. # # Let's first see what this looks like *without* defining `max_targets`: text = "Pt presents with diabetes, pneumonia vs COPD" doc = nlp(text) doc.ents = (doc[3:4], doc[5:6], doc[7:8]) doc.ents item = ConTextItem("vs", category="UNCERTAIN", rule="BIDIRECTIONAL", max_scope=None) context = ConTextComponent(nlp, rules=None) context.add([item]) context(doc) visualize_dep(doc) # Now, let's start over and set `max_targets` to **1**: doc = nlp(text) doc.ents = (doc[3:4], doc[5:6], doc[7:8]) item = ConTextItem("vs", category="UNCERTAIN", rule="BIDIRECTIONAL", max_targets=1) context = ConTextComponent(nlp, rules=None) context.add([item]) context(doc) visualize_dep(doc) # ### max_scope # One limitation of using `max_targets` is that in a sentence like the example above, each concept has to be extracted as an entity in order for it to reduce the scope - if **"pneumonia"** was not extracted, then **"vs"** would still etend as far back as **"diabetes"**. # # We can address this by explicitly setting the scope to be no greater than a certain number of tokens using `max_scope`. For example, lab results may show up in a text document with many individual results: # # --- # Adenovirus DETECTED<br> # SARS NOT DETECTED<br> # ... # Cov HKU1 NOT DETECTED<br> # # --- # # Texts like this are often difficult to parse and they are often not ConText-friendly because many lines can be extracted as a single sentence. By default, a modifier like **"NOT DETECTED"** could extend far back to a concept such as **"Adenovirus"**, which we see returned positive. We may also not explicitly extract every virus tested in the lab, so `max_targets` won't work. # # With text formats like this, we can be fairly certain that **"Not Detected"** will only modify the single concept right before it. We can set `max_scope` to be so **only** a single concept will be modified. text = """Adenovirus DETECTED Sars NOT DETECTED Pneumonia NOT DETECTED""" doc = nlp(text) doc.ents = (doc[0:1], doc[2:3], doc[5:6]) doc.ents assert len(list(doc.sents)) == 1 item_data = [ConTextItem("DETECTED", category="POSITIVE_EXISTENCE", rule="BACKWARD", max_scope=None), ConTextItem("NOT DETECTED", category="DEFINITE_NEGATED_EXISTENCE", rule="BACKWARD", max_scope=None), ] context = ConTextComponent(nlp, rules=None) context.add(item_data) context(doc) visualize_dep(doc) # Let's now set `max_scope` to 1 and we'll find that only **"pneumonia"** and **"Sars"** are modified by **"NOT DETECTED"**: doc = nlp(text) doc.ents = (doc[0:1], doc[2:3], doc[5:6]) doc.ents item_data = [ConTextItem("DETECTED", category="POSITIVE_EXISTENCE", rule="BACKWARD", max_scope=1), ConTextItem("NOT DETECTED", category="DEFINITE_NEGATED_EXISTENCE", rule="BACKWARD", max_scope=1), ] context = ConTextComponent(nlp, rules=None) context.add(item_data) context(doc) visualize_dep(doc) # ## Example 7: Filtering target types # You may want modifiers to only modify targets with certain semantic classes. You can specify which types to be modified/not be modified through the `allowed_types` and `excluded_types` arguments. # # For example, in the sentence: # # --- # "She is not prescribed any beta blockers for her hypertension." # # --- # # **"Beta blockers"** is negated by the phrase **not prescribed"**, but **"hypertension"** should not be negated. By default, a modifier will modify all concepts in its scope, regardless of semantic type: from spacy.tokens import Span # Let's write a function to create this manual example def create_medication_example(): doc = nlp("She is not prescribed any beta blockers for her hypertension.") # Manually define entities medication_ent = Span(doc, 5, 7, "MEDICATION") condition_ent = Span(doc, 9, 10, "CONDITION") doc.ents = (medication_ent, condition_ent) return doc doc = create_medication_example() doc # Define our item data without any type restrictions item_data = [ConTextItem("not prescribed", "NEGATED_EXISTENCE", "FORWARD")] context = ConTextComponent(nlp, rules="other", rule_list=item_data) context(doc) # Visualize the modifiers visualize_dep(doc) # To change this, we can make sure that **"not prescribed"** only modifies **MEDICATION** entities by setting `allowed_types` to **"MEDICATION"**; item_data2 = [ConTextItem("not prescribed", "NEGATED_EXISTENCE", "FORWARD", allowed_types={"MEDICATION"})] context = ConTextComponent(nlp, rules="other", rule_list=item_data2) doc = create_medication_example() context(doc) # Now, only **"beta blockers"** will be negated: visualize_dep(doc) # The same can be achieved by setting `excluded_types` to `{"CONDITION"}`. item_data3 = [ConTextItem("not prescribed", "NEGATED_EXISTENCE", "FORWARD", excluded_types={"CONDITION"})] # # Setting additional Span attributes # As seen in an earlier notebook, cycontext registers two new attributes for target Spans: `is_experienced` and `is_current`. These values are set to default values of True and changed if a target is modified by certain modifiers. This logic is set in the variable `DEFAULT_ATTRS`. This is a dictionary which maps modifier category names to the attribute name/value pair which should be set if a target is modified by that modifier type. from cycontext.context_component import DEFAULT_ATTRS DEFAULT_ATTRS # ## Defining custom attributes # Rather than using the logic shown above, you can set your own attributes by creating a dictionary with the same structure as DEFAULT_ATTRS and passing that in as the `add_attrs` parameter. If setting your own extensions, you must first call `Span.set_extension` on each of the extensions. # # If more complex logic is required, custom attributes can also be set manually outside of the ConTextComponent, for example as a post-processing step. # # Below, we'll create our own attribute mapping and have them override the default cycontext attributes. We'll defined `is_experienced` and `is_family_history`. Because both a negated concept and a family history concept are not actually experienced by a patient, we'll specify both to set `is_experienced` to False. We'll also set the family history modifier to add a new attribute called `is_family_history`. from spacy.tokens import Span # Define modifiers and Span attributes custom_attrs = { 'NEGATED_EXISTENCE': {'is_experienced': False}, 'FAMILY_HISTORY': {'is_family_history': True, 'is_experienced': False}, } # Register extensions - is_experienced should be True by default, `is_family_history` False Span.set_extension("is_experienced", default=True) Span.set_extension("is_family_history", default=False) context = ConTextComponent(nlp, rules=None, add_attrs=custom_attrs) context.context_attributes_mapping # + item_data = [ConTextItem("no evidence of", "NEGATED_EXISTENCE", "FORWARD"), ConTextItem("family history", "FAMILY_HISTORY", "FORWARD"), ] context.add(item_data) # + doc = nlp("There is no evidence of pneumonia. Family history of diabetes.") doc.ents = doc[5:6], doc[-2:-1] doc.ents # - context(doc) # The new attributes are now available in `ent._`: for ent in doc.ents: print(ent) print("is_experienced: ", ent._.is_experienced) print("is_family_history: ", ent._.is_family_history) print() # # Reading and Writing a Knowledge Base # ConTextItems can be saved as JSON and read in, which allows a knowledge base to be reused and scaled. When you install `cycontext` with pip or `python setup.py install`, it includes a JSON file of default modifier rules. That file is also included in the GitHub repo: # # https://github.com/medspacy/cycontext/blob/master/kb/default_rules.json # # The filepath on your local machine can be accessed in the constant `DEFAULT_RULES_FILEPATH`. Let's look at the first 10 lines of this file: from cycontext import DEFAULT_RULES_FILEPATH DEFAULT_RULES_FILEPATH with open(DEFAULT_RULES_FILEPATH) as f: print(f.read()[:500]) # A JSON file of item data can be loaded with the `ConTextItem.from_json` method: item_data = ConTextItem.from_json(DEFAULT_RULES_FILEPATH) for item in item_data[:5]: print(item) # The items can also be saved as JSON by using the `ConTextItem.to_json` method: ConTextItem.to_json(item_data[:2], "2_modifiers.json") import json with open("2_modifiers.json") as f: print(json.load(f))
notebooks/02_Customizing_Modifiers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 数据关联 # # ```{note} # 在上一节,我们学习了 Spark SQL 支持的诸多算子,其中数据关联(join)是数据分析中很常见、很重要的操作。 # 数据关联可分为内关联、外关联、左关联和右关联。 # ``` # ## 准备数据 # + from pyspark.sql import SparkSession spark = (SparkSession .builder .appName("df operators") .config('spark.executor.memory', '3g') .config('spark.driver.memory', '6g') .config("spark.executor.cores", 2) .config('spark.cores.max', 4) .getOrCreate()) # - # 创建员工信息表 seq = [(1, "Mike", 28, "Male"), (2, "Lily", 30, "Female"), (3, "Raymond", 26, "Male"), (5, "Dave", 36, "Male")] employeesDF = spark.createDataFrame(seq, ["id", "name", "age", "gender"]) employeesDF.show() # 创建员工薪水表 seq2 = [(1, 26000), (2, 30000), (4, 25000), (3, 20000)] salariesDF = spark.createDataFrame(seq2, ["id", "salary"]) salariesDF.show() # ## 内关联 # # 仅保留左右表中满足条件的数据记录。 # 左表中 name=Dave 的记录被丢弃了 # 右表中 id=4 的记录被丢弃了 employeesDF.join(salariesDF, "id", "inner").show() # # 左关联 # # 以左表为准,保留所有左表记录,尽量去匹配右表 # name=Dave 的记录被保留,salary=null employeesDF.join(salariesDF, "id", "left").show() # ## 右关联 # # 与左关联相反,以右表为准 # id=5 的记录被保留,name=age=gender=null employeesDF.join(salariesDF, "id", "right").show() # ## 外关联 # # 也被称为全关联(full),顾名思义,我全都要。 # name=Dave 和 id=5 的记录都被保留 employeesDF.join(salariesDF, "id", "outer").show()
sql/5.join.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Deep Learning: Rooms of a House # %reload_ext autoreload # %autoreload 2 # %matplotlib inline import os from fastai.imports import * from fastai.conv_learner import * from fastai.plots import * PATH = 'dl-rooms/data/' TRAIN = f'{PATH}train/' # TEST = f'{PATH}test/' LABELS = f'{PATH}labels.csv' arch = resnet50 # ## 1 Initial Data Exploration # !ls {PATH} # !ls {TRAIN} os.listdir(TRAIN) data = [] for folder in os.listdir(TRAIN): for image in os.listdir(f'{TRAIN}{folder}'): data.append({ 'file': f'{folder}/{image}', 'room_name': folder }) data labels_df = pd.DataFrame(data, columns=['file', 'room_name']) labels_df.head() labels_df.to_csv(LABELS, index=False) n = len(list(open(LABELS))) - 1 val_idxs = get_cv_idxs(n) sz = 224 tfms = tfms_from_model(arch, sz, aug_tfms=transforms_top_down, max_zoom=1.1) data = ImageClassifierData.from_csv(PATH, 'train', LABELS, val_idxs=val_idxs, tfms=tfms) fname = PATH + data.trn_ds.fnames[0]; fname img = PIL.Image.open(fname); img size_dict = { k: PIL.Image.open(PATH + k).size for k in data.trn_ds.fnames } row_sz, col_sz = list(zip(*size_dict.values())) row_sz = np.array(row_sz); col_sz = np.array(col_sz) plt.hist(row_sz[row_sz<1500]) plt.hist(col_sz[col_sz<1500]) np.median(row_sz), np.median(col_sz) # ## 2 Initial Model def get_data(sz, bs=64): tfms = tfms_from_model(arch, sz, aug_tfms=transforms_top_down, max_zoom=1.1) return ImageClassifierData.from_csv(PATH, 'train', LABELS, val_idxs=val_idxs, tfms=tfms) # ### 2.1 Precompute sz = 224 bs = 8 data = get_data(sz, bs) learn = ConvLearner.pretrained(arch, data, precompute=True) lrf = learn.lr_find2() learn.sched.plot() learn.fit(1e-2, 10) learn.precompute = False learn.save('224_pre_last_layer') learn.load('224_pre_last_layer') # ### 2.2 Data Augmentation learn.precompute = False learn.fit(1e-3, 5, cycle_len=1, cycle_mult=2) learn.save('224_pre_last_layer_with_data_aug') learn.load('224_pre_last_layer_with_data_aug') learn.unfreeze() lr = 1e-2 lrs = [lr/100, lr/10, lr] learn.fit(lrs, 3, cycle_len=1, cycle_mult=2) learn.fit(1e-3/3, 3, cycle_len=2) learn.save('224_all_layers')
dl-rooms-dougshamoo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import json import os import os.path as osp import numpy as np # %matplotlib inline # + is_ipython = 'inline' in plt.get_backend() if is_ipython: from IPython import display plt.ion() # + DIV_LINE_WIDTH = 50 # Global vars for tracking and labeling data at load time. exp_idx = 0 units = dict() # - def plot_data(data, xaxis='Epoch', value="AverageEpRet", condition="Condition1", smooth=1, **kwargs): if smooth > 1: """ smooth data with moving window average. that is, smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k]) where the "smooth" param is width of that window (2k+1) """ y = np.ones(smooth) for datum in data: x = np.asarray(datum[value]) z = np.ones(len(x)) smoothed_x = np.convolve(x,y,'same') / np.convolve(z,y,'same') datum[value] = smoothed_x if isinstance(data, list): data = pd.concat(data, ignore_index=True) sns.set(style="darkgrid", font_scale=1.5) sns.tsplot(data=data, time=xaxis, value=value, unit="Unit", condition=condition, ci='sd', **kwargs) """ If you upgrade to any version of Seaborn greater than 0.8.1, switch from tsplot to lineplot replacing L29 with: sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs) Changes the colorscheme and the default legend style, though. """ plt.legend(loc='best').set_draggable(True) #plt.legend(loc='upper center', ncol=3, handlelength=1, # borderaxespad=0., prop={'size': 13}) """ For the version of the legend used in the Spinning Up benchmarking page, swap L38 with: plt.legend(loc='upper center', ncol=6, handlelength=1, mode="expand", borderaxespad=0., prop={'size': 13}) """ xscale = np.max(np.asarray(data[xaxis])) > 5e3 if xscale: # Just some formatting niceness: x-axis scale in scientific notation if max x is large plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) plt.tight_layout(pad=0.5) def get_datasets(logdir, condition=None): """ Recursively look through logdir for output files produced by spinup.logx.Logger. Assumes that any file "progress.txt" is a valid hit. """ global exp_idx global units datasets = [] for root, _, files in os.walk(logdir): if 'progress.txt' in files: exp_name = None try: config_path = open(os.path.join(root,'config.json')) config = json.load(config_path) if 'exp_name' in config: exp_name = config['exp_name'] except: print('No file named config.json') condition1 = condition or exp_name or 'exp' condition2 = condition1 + '-' + str(exp_idx) exp_idx += 1 if condition1 not in units: units[condition1] = 0 unit = units[condition1] units[condition1] += 1 try: exp_data = pd.read_table(os.path.join(root,'progress.txt')) except: print('Could not read from %s'%os.path.join(root,'progress.txt')) continue performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet' exp_data.insert(len(exp_data.columns),'Unit',unit) exp_data.insert(len(exp_data.columns),'Condition1',condition1) exp_data.insert(len(exp_data.columns),'Condition2',condition2) exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance]) datasets.append(exp_data) return datasets def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None): """ For every entry in all_logdirs, 1) check if the entry is a real directory and if it is, pull data from it; 2) if not, check to see if the entry is a prefix for a real directory, and pull data from that. """ logdirs = [] for logdir in all_logdirs: if osp.isdir(logdir) and logdir[-1]==os.sep: logdirs += [logdir] else: basedir = osp.dirname(logdir) fulldir = lambda x : osp.join(basedir, x) prefix = logdir.split(os.sep)[-1] listdir= os.listdir(basedir) logdirs += sorted([fulldir(x) for x in listdir if prefix in x]) """ Enforce selection rules, which check logdirs for certain substrings. Makes it easier to look at graphs from particular ablations, if you launch many jobs at once with similar names. """ if select is not None: logdirs = [log for log in logdirs if all(x in log for x in select)] if exclude is not None: logdirs = [log for log in logdirs if all(not(x in log) for x in exclude)] # Verify logdirs print('Plotting from...\n' + '='*DIV_LINE_WIDTH + '\n') for logdir in logdirs: print(logdir) print('\n' + '='*DIV_LINE_WIDTH) # Make sure the legend is compatible with the logdirs assert not(legend) or (len(legend) == len(logdirs)), \ "Must give a legend title for each set of experiments." # Load data from logdirs data = [] if legend: for log, leg in zip(logdirs, legend): data += get_datasets(log, leg) else: for log in logdirs: data += get_datasets(log) return data def make_plots(all_logdirs, legend=None, xaxis=None, values=None, count=False, font_scale=1.5, smooth=1, select=None, exclude=None, estimator='mean'): data = get_all_datasets(all_logdirs, legend, select, exclude) values = values if isinstance(values, list) else [values] condition = 'Condition2' if count else 'Condition1' estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min? for value in values: plt.figure() plot_data(data, xaxis=xaxis, value=value, condition=condition, smooth=smooth, estimator=estimator) plt.show() # + # variables """ Args: logdir (strings): As many log directories (or prefixes to log directories, which the plotter will autocomplete internally) as you'd like to plot from. legend (strings): Optional way to specify legend for the plot. The plotter legend will automatically use the ``exp_name`` from the config.json file, unless you tell it otherwise through this flag. This only works if you provide a name for each directory that will get plotted. (Note: this may not be the same as the number of logdir args you provide! Recall that the plotter looks for autocompletes of the logdir args: there may be more than one match for a given logdir prefix, and you will need to provide a legend string for each one of those matches---unless you have removed some of them as candidates via selection or exclusion rules (below).) xaxis (string): Pick what column from data is used for the x-axis. Defaults to ``TotalEnvInteracts``. value (strings): Pick what columns from data to graph on the y-axis. Submitting multiple values will produce multiple graphs. Defaults to ``Performance``, which is not an actual output of any algorithm. Instead, ``Performance`` refers to either ``AverageEpRet``, the correct performance measure for the on-policy algorithms, or ``AverageTestEpRet``, the correct performance measure for the off-policy algorithms. The plotter will automatically figure out which of ``AverageEpRet`` or ``AverageTestEpRet`` to report for each separate logdir. count: Optional flag. By default, the plotter shows y-values which are averaged across all results that share an ``exp_name``, which is typically a set of identical experiments that only vary in random seed. But if you'd like to see all of those curves separately, use the ``--count`` flag. smooth (int): Smooth data by averaging it over a fixed window. This parameter says how wide the averaging window will be. select (strings): Optional selection rule: the plotter will only show curves from logdirs that contain all of these substrings. exclude (strings): Optional exclusion rule: plotter will only show curves from logdirs that do not contain these substrings. """ ## target log file path logdir = ['/workspaces/play-spinningup-with-docker/spinningup/data/installtest/installtest_s0'] ## [Optional] legends legends = None ## xaxis xaxis = 'TotalEnvInteracts' ## value value = 'Performance' ## count count = 'store_true' ## smooth smooth = 1 ## [Optional] select select = None ## [Optional] exclude exclude = None ## est est = 'mean' # - # main make_plots(logdir, legends, xaxis, value, count, smooth=smooth, select=select, exclude=exclude, estimator=est)
plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Accessing NASADEM data on Azure (COG) # # [NASADEM](https://earthdata.nasa.gov/esds/competitive-programs/measures/nasadem) provides global topographic data at 1 arc-second (~30m) horizontal resolution, derived primarily from data captured via the [Shuttle Radar Topography Mission](https://www2.jpl.nasa.gov/srtm/) (SRTM). # # NASADEM is hosted on Azure in both NetCDF and cloud-optimized GeoTIFF (COG) formats; this notebook demonstrates access to the COG-formatted data. # # This notebook demonstrates access to NASADEM data from blob storage on Azure, including (1) finding the NASADEM tile corresponding to a lat/lon coordinate and (2) opening and plotting the tile. # # This dataset is stored in the West Europe Azure region, so this notebook will run most efficiently on Azure compute located in the same region. If you are using this data for environmental science applications, consider applying for an [AI for Earth grant](http://aka.ms/ai4egrants) to support your compute requirements. # # This dataset is documented at [aka.ms/ai4edata-nasadem](http://aka.ms/ai4edata-nasa). # ### Imports and environment # + import os import math import requests import rasterio as rio from rasterio import plot from matplotlib import pyplot as plt nasadem_account_name = 'nasademeuwest' nasadem_container_name = 'nasadem-cog' nasadem_account_url = 'https://' + nasadem_account_name + '.blob.core.windows.net' nasadem_blob_root = nasadem_account_url + '/' + nasadem_container_name + '/v001/' # A full list of files is available at: # # https://nasademeuwest.blob.core.windows.net/nasadem-cog/v001/index/nasadem_cog_list.txt nasadem_file_index_url = nasadem_blob_root + 'index/nasadem_cog_list.txt' nasadem_content_extension = '.tif' nasadem_file_prefix = 'NASADEM_HGT_' # This will contain a list of all available .tif files nasadem_file_list = None # - # ### Read the file list f = requests.get(nasadem_file_index_url) nasadem_file_list = f.text nasadem_file_list = nasadem_file_list.split('\n') print('Read list of {} available files'.format(len(nasadem_file_list))) # ### Functions def lat_lon_to_nasadem_tile(lat,lon): """ Get the NASADEM file name for a specified latitude and longitude """ # A tile name looks like: # # NASADEM_HGT_n00e016.tif # # The translation from lat/lon to that string is represented nicely at: # # https://dwtkns.com/srtm30m/ ns_token = 'n' if lat >=0 else 's' ew_token = 'e' if lon >=0 else 'w' lat_index = abs(math.floor(lat)) lon_index = abs(math.floor(lon)) lat_string = ns_token + '{:02d}'.format(lat_index) lon_string = ew_token + '{:03d}'.format(lon_index) filename = nasadem_file_prefix + lat_string + lon_string + \ nasadem_content_extension if filename not in nasadem_file_list: print('Lat/lon {},{} not available'.format(lat,lon)) filename = None return filename # ### Load a NASADEM tile for a known lat/lon # + # Interesting places for looking at SRTM data everest = [27.9881,86.9250] seattle = [47.6062,-122.3321] grand_canyon = [36.101690, -112.107676] mount_fuji = [35.3606, 138.7274] mont_blanc = [45.832778, 6.865000] invalid = [-15.1,41] tile_of_interest = mount_fuji tile_name = lat_lon_to_nasadem_tile(tile_of_interest[0],tile_of_interest[1]) url = nasadem_blob_root + tile_name print('Plotting tile: {}'.format(tile_name)) # - # ### Plot the tile # + src = rio.open(url) d = src.read(1) fig, ax = plt.subplots(figsize=(5, 5)) # create a hidden image so we can add a colorbar image_hidden = ax.imshow(d,cmap='viridis') image = rio.plot.show(d, transform=src.transform, ax=ax, cmap='viridis') fig.colorbar(image_hidden, ax=ax);
data/nasadem-cog.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt # %matplotlib inline plt.rc('font', size=12) plt.rc('axes', labelsize=12) # - import gglngram as gn import sqlite3 as sq # http://storage.googleapis.com/books/ngrams/books/datasetsv2.html dbconnection = sq.connect('database.sqlite') cursor = dbconnection.cursor() # + getTopBulk = """ SELECT bulk, sum(match_count) AS total_match FROM ngram GROUP BY bulk ORDER BY total_match DESC LIMIT 20 """ getTopNgram = """ SELECT ngram, sum(match_count) AS total_match FROM ngram GROUP BY ngram ORDER BY total_match DESC LIMIT 20 """ # + cursor.execute( getTopNgram ) rows = cursor.fetchall() for row in rows: print(row) # - # ## Match per year # + query = """select year, sum(match_count), sum(volume_count) from ngram group by year ORDER BY year ASC""" cursor.execute( query ) countByYears = cursor.fetchall() years, TotalMatchesPerYear, TotalVolumesPerYear = zip( *countByYears ) print(len(years)) TotalMatchesPerYear = np.array( TotalMatchesPerYear ) # + plt.figure( figsize=(10, 5) ) plt.subplot(2, 1, 1) plt.semilogy( years, TotalMatchesPerYear ) plt.xlabel('years') plt.ylabel('number of matches') plt.subplot(2, 1, 2) plt.semilogy( years, TotalVolumesPerYear ) plt.xlabel('years') plt.ylabel('number of books') # - # ## Match per year for one Ngram def getCountPerYear( ngram, normed=False ): query = """ select year, match_count, volume_count from ngram where ngram is '%s' ORDER BY year ASC """ % ngram cursor.execute( query ) countByYears = cursor.fetchall() years, matchesPerYear, VolumesPerYear = zip( *countByYears ) years, matchesPerYear = np.array(years), np.array(matchesPerYear) if normed: matchesPerYear = matchesPerYear / TotalMatchesPerYear return years, matchesPerYear # + ngram = 'avenir_NOUN' years, matchesPerYear = getCountPerYear( ngram ) plt.figure( figsize=(10, 2) ) plt.semilogy( years, matchesPerYear ) plt.xlabel('years') plt.title(ngram); plt.ylabel('number of matches'); # -
explore_database_old.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # + import numpy as np import random # %matplotlib inline import matplotlib.pyplot as plt def init_vel(N): ''' Function that calculates psuedo-random initial velocities for each specified particle in system. NOTE: does not account for extrenal forces/static system ''' # Initial array with zero vector velocity = np.array([0,0]) # Iterates through all particles for i in range(N): # Creates random numbers specified in Chapter 5.2.2 of MD Simulation xi = np.array([np.random.uniform(-1,1),np.random.uniform(-1,1)]) # Calculates each component of velocity per Chapter 5.2.2 of MD simulation vx = xi[0]/np.sqrt(xi[0]**2 + xi[1]**2) vy = xi[1]/np.sqrt(xi[0]**2 + xi[1]**2) # Creates velocity vector for Particle_i vel_i = np.array([vx,vy]) # Appends velocity vector to array velocity = np.vstack((velocity,vel_i)) # Removing initial zero vector velocity = velocity[1:] return velocity def init_vel_max(N): ''' Function that calculates psuedo-random initial velocities for each specified particle in system. NOTE: does not account for extrenal forces/static system ''' # Initial array with zero vector velocity = np.array([0,0,0]) # Iterates through all particles for i in range(N): # Creates random numbers specified in Chapter 5.2.2 of MD Simulation xi = np.array(np.random.randn(1,3)) # Calculates each component of velocity per Chapter 5.2.2 of MD simulation vx = xi[0][0]/np.sqrt(xi[0][0]**2 + xi[0][1]**2 + xi[0][2]**2) vy = xi[0][1]/np.sqrt(xi[0][0]**2 + xi[0][1]**2 + xi[0][2]**2) vz = xi[0][2]/np.sqrt(xi[0][0]**2 + xi[0][1]**2 + xi[0][2]**2) # Creates velocity vector for Particle_i vel_i = np.array([vx,vy,vz]) # Appends velocity vector to array velocity = np.vstack((velocity,vel_i)) # Removing initial zero vector velocity = velocity[1:] return velocity # Assigned number of particles N = 100 # Calling function to collect initial velocities velocity1 = init_vel(N) #velocity2 = init_vel_max(N) #print(velocity1) #interim = np.array([]) #for i in range(len(velocity1)): # mag = np.sqrt(velocity1[i][0]**2 + velocity1[i][1]**2 + velocity1[i][2]**2) # interim = np.append(interim,mag) #fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(8, 4)) #ax0.hist(velocity1) #ax0.set_title('Random.Uniform') #ax1.hist(velocity2) #ax1.set_title('Random.Randn') # Blue is x component # Red is y component # Green is z component # - # Separates the velocity in to two arrays v_x = velocity1[:, 0] v_y = velocity1[:, 1] # + def position(N): ''' Initializes x-y position for defined number of particles. ''' # Creates array for both x and y positions x_t = np.array([]) y_t = np.array([]) # Creates a binary check-variable check = 1 # Collects 100 values while len(x_t)<N: # Picks a random number from 0 to 1 # Can be scaled for side length x = np.random.random() # Iterates through all current x positions for i in x_t: # If the random x is within the rejection radius... if (x > (i-0.01)) and (x < (i+0.01)): # ...binary changes check = 0 else: # Otherwise the value is unchanged check = 1 # If the binary has not changed... if check == 1: # ...then the random x is accepted x_t = np.append(x_t,x) # Resets binary variables check = 1 # Collects 100 values while len(y_t)<N: # Chooses a random value from 0 to 1 # Can be scaled for side length y = np.random.random() # Iterates throgh all previous y values for i in y_t: # If the random y is within the rejection radius... if (y > (i-0.01)) and (y < (i+0.01)): # ...binary changes check = 0 else: # Otherwise the value is unchanged check = 1 # If binary has not changed... if check == 1: # ...then the random y is accepted y_t = np.append(y_t,y) return x_t,y_t N = 100 x,y = position(N) #print(x,'\n',y) # - def radius(x,y,i): ''' calculates distance from each other particle i - particle in question ''' # Distance formula radii = np.array(np.sqrt((x-x[i])**2 + (y - y[i])**2)) return radii # + def x_Force(p_radii,x,i): ''' calculates total force in x direction on one particle i - particle in question x - x values for all particles radii - radial distance from particle in question of every other particle ''' sum_F_x = 0 # Iterates through every radius for value in p_radii: # Skips the particle itself if value == 0: continue # Adds the force for each particle in the system sum_F_x += x[i]*((48/value**14) - (24/value**8)) return sum_F_x def y_Force(p_radii,y,i): ''' calculates total force in y direction on one particle i - particle in question y - y values for all particles radii - radial distance from particle in question of every other particle ''' sum_F_y = 0 # Iterates through every radius for value in p_radii: # Skips the particle itself if value == 0: continue # Adds the force for each particle in the system sum_F_y += y[i]*((48/value**14) - (24/value**8)) return sum_F_y #F_x = np.array([]) #F_y = np.array([]) #for i in range(len(x)): # p_radii = radius(x,y,i) # F_x = np.append(F_x,x_Force(p_radii,x,i)) # F_y = np.append(F_y,y_Force(p_radii,y,i)) # + time_step = 1e-15 time = 0 #gtime = np.linspace(0,10,102) pos = np.array([x[0]]) # Initializing xyz file file = open("testfile.xyz","w") # Specified time for z in range(100): # Writes the beginning line for the file file.write('100\n') file.write('x, y, vx, vy, KE\n') # Resets Force array at beginning of loop F_x = np.array(x_Force(p_radii,x,i)) F_y = np.array(y_Force(p_radii,y,i)) # Calculates the Force for every particle # Difference equations that calculate position and velocity x = np.array(x + (v_x*time_step)) pos = np.append(pos,x[0]) v_x = np.array(v_x + (F_x*time_step)) y = np.array(y + (v_y*time_step)) v_y = np.array(v_y + (F_y*time_step)) # Saves Kinetic Energy for each particle Kinetic = (0.5*(v_x**2 + v_y**2)) time += time_step for i in range(len(x)): # Writes information to animation file file.write(str(x[i])) file.write(' ') file.write(str(y[i])) file.write(' ') file.write(str(v_x[i])) file.write(' ') file.write(str(v_y[i])) file.write(' ') file.write(str(Kinetic[i])) file.write('\n') # Collects the radial distance of every other particle from the specified one p_radii = radius(x,y,i) # Calculates x and y forces from fucntion above F_x = np.append(F_x,x_Force(p_radii,x,i)) F_y = np.append(F_y,y_Force(p_radii,y,i)) print('All Done!') # -
velocity_init.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: causalgene # language: python # name: causalgene # --- # ## Plotting Results experiment_name = ['l1000_AE','l1000_cond_VAE','l1000_VAE','l1000_env_prior_VAE'] import numpy as np from scipy.spatial.distance import cosine from scipy.linalg import svd, inv import pandas as pd import matplotlib.pyplot as plt import dill as pickle import os import pdb import torch import ai.causalcell from ai.causalcell.training import set_seed from ai.causalcell.utils import configuration os.chdir(os.path.join(os.path.dirname(ai.__file__), "..")) print("Working in", os.getcwd()) def load_all_losses(res, name='recon_loss'): all_train_loss = [] for epoch in range(len(res['losses']['train'])): train_loss = np.mean([res['losses']['train'][epoch][name] ]) all_train_loss.append(train_loss) all_valid_loss = [] for epoch in range(len(res['losses']['valid'])): valid_loss = np.mean([res['losses']['valid'][epoch][name] ]) all_valid_loss.append(valid_loss) return all_train_loss, all_valid_loss def epoch_length(i): return results[i]['n_samples_in_split']['train'] def get_tube(x_coord, valid_loss1, valid_loss2, valid_loss3): min_length = min(len(valid_loss1), len(valid_loss2), len(valid_loss3)) concat_lists = np.array([valid_loss1[:min_length], valid_loss2[:min_length], valid_loss3[:min_length]]) st_dev_list = np.std(concat_lists, 0) mean_list = np.mean(concat_lists, 0) return x_coord[:min_length], mean_list, st_dev_list result_dir = os.path.join(os.getcwd(), "results", experiment_name[1]) results = [] for exp_id in range(1,4): with open(os.path.join(result_dir,'results_' + str(exp_id) + '.pkl'), 'rb') as f: results.append(pickle.load(f)) # ### Reconstruction Loss all_train_loss, all_valid_loss = load_all_losses(results[1]) plt.plot(all_train_loss, label="train") plt.plot(all_valid_loss, label="valid") plt.title("reconstruction loss") plt.legend() plt.show() # ### Reconstruction Loss log scale plt.yscale("log") plt.plot(all_train_loss, label="train") plt.plot(all_valid_loss, label="valid") plt.title("reconstruction loss log scale") plt.legend() plt.show() # ### Reconstruction Loss with std deviation # + plt.figure(figsize=(6,4), dpi=200) for exp in experiment_name: results = [] all_exp_losses = [] result_dir = os.path.join(os.getcwd(), "results", exp) for exp_id in range(1,4): with open(os.path.join(result_dir,'results_' + str(exp_id) + '.pkl'), 'rb') as f: results.append(pickle.load(f)) for exp_id in range(3): all_exp_losses.append(load_all_losses(results[exp_id])) exp_id =0 valid_loss1 = all_exp_losses[exp_id][1] valid_loss2 = all_exp_losses[exp_id+1][1] valid_loss3 = all_exp_losses[exp_id+2][1] x_coord = [epoch_length(exp_id)*i for i in range(len(valid_loss1))] x_coord_tube, mean_list, st_dev_list = get_tube(x_coord, valid_loss1, valid_loss2, valid_loss3) plt.fill_between(x_coord_tube, mean_list - st_dev_list, mean_list + st_dev_list, alpha=.2) label = list(results[exp_id]['config']['model'].keys())[0] \ + " with " + str(results[exp_id]['n_envs_in_split']['train']) + " envs" plt.plot(x_coord_tube, mean_list, label=label) plt.title("reconstruction losses") #plt.yscale("log") #plt.xlim((0,3000000)) plt.legend() plt.show() # -
notebooks/ResultExploration/Result_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from softcomputing_packages_bu import * # Load Dataset, Fillna, Categorical dataset = pd.read_csv('./columnsmerge.csv', encoding='euc-kr') dataset = dataset.replace([np.inf, -np.inf], np.nan) dataset = dataset.fillna(0) print(dataset.shape) print(dataset) # To Categorical dataset_code = dataset['상권_코드'] dataset_code_cat = pd.get_dummies(dataset_code) print(dataset_code_cat.shape) print(dataset.shape) dataset = pd.concat([dataset, dataset_code_cat], axis=1) dataset = dataset.drop(columns=['상권_코드']) print(dataset.shape) dataset_service_code = dataset['서비스_업종_코드'] dataset_service_code_cat = pd.get_dummies(dataset_service_code) print(dataset_service_code_cat.shape) dataset = pd.concat([dataset, dataset_service_code], axis=1) dataset = dataset.drop(columns=['서비스_업종_코드']) print(dataset.shape) # X, Y, Definition Y = dataset['당월_매출_금액'].values X = dataset.drop(columns=['기준_분기_코드', '기준_년_코드', '당월_매출_금액', '매출/점포']).values print(X.shape, Y.shape) # Y Scaling Y_scaled = [] Y_max, Y_min = np.max(Y), np.min(Y) for r in range(Y.shape[0]): Y_scaled.append((Y[r]-Y_min)/(Y_max-Y_min)) Y_scaled = np.asarray(Y_scaled) Y = Y_scaled # Split Tr, Te X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.7, shuffle=True) print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape) # Build Model m_input = Input(shape=(X.shape[1],)) H = Dense(800, activation='tanh')(m_input) H = Dense(512, activation='tanh')(H) H = Dropout(0.5)(H) H = Dense(128, activation='tanh')(H) H = Dense(64, activation='tanh')(H) m_output = Dense(1, activation='sigmoid')(H) model_mlp = Model(m_input, m_output) model_mlp.compile(loss='binary_crossentropy', optimizer='adam') model_mlp.summary() # Train, Validate Model es = EarlyStopping(monitor='val_loss', mode='min', patience=80) model_mlp.fit(X_train, Y_train, validation_data=(X_test, Y_test), shuffle=True, verbose=2, epochs=1000, batch_size=4000, callbacks=[es]) # Learning Curve plot_history(model_mlp) # Reconstructed vis_range = 500 vis_start = 2000 Y_test_hat = model_mlp.predict(X_test, batch_size=4000) plt.plot(Y_test[vis_start:vis_start+vis_range]) plt.plot(Y_test_hat[vis_start:vis_start+vis_range]) plt.show() # Build Model c_input = Input(shape=(X.shape[1],)) H = Reshape((X.shape[1], 1))(c_input) H = Conv1D(filters=20, kernel_size=(2), activation='tanh', padding='same')(H) H = MaxPool1D(2)(H) H = Dropout(0.5)(H) H = Conv1D(filters=20, kernel_size=(2), activation='tanh', padding='same')(H) H = MaxPool1D(2)(H) H = Dropout(0.5)(H) H = Conv1D(filters=20, kernel_size=(2), activation='tanh', padding='same')(H) H = MaxPool1D(2)(H) H = Dropout(0.5)(H) H = Flatten()(H) H = Dense(800, activation='tanh')(H) H = Dense(512, activation='tanh')(H) H = Dropout(0.5)(H) H = Dense(128, activation='tanh')(H) H = Dense(64, activation='tanh')(H) c_output = Dense(1, activation='sigmoid')(H) model_cnn = Model(c_input, c_output) model_cnn.compile(loss='binary_crossentropy', optimizer='adam') model_cnn.summary() # Train, Validate Model es = EarlyStopping(monitor='val_loss', mode='min', patience=80) model_cnn.fit(X_train, Y_train, validation_data=(X_test, Y_test), shuffle=True, verbose=2, epochs=1000, batch_size=4000, callbacks=[es]) # Learning Curve plot_history(model_cnn) # Reconstructed vis_range = 100 vis_start = 2000 Y_test_hat = model_cnn.predict(X_test, batch_size=4000) plt.plot(Y_test[vis_start:vis_start+vis_range]) plt.plot(Y_test_hat[vis_start:vis_start+vis_range]) plt.show()
dayoung_trial1/00_benchmark_v1_columnsmerge.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + # %matplotlib notebook import numpy as np import matplotlib.pyplot as plt # + A = 10 f1 = 100 f2 = 1000 f3 = 10000 t = np.arange(f1)/1000 x1 = A*np.cos(2*np.pi*f1*t) x2 = A*np.cos(2*np.pi*f2*t) x3 = A*np.cos(2*np.pi*f3*t) # - #plt.plot(t,x1) plt.plot(t,x2) #plt.plot(x3)
Untitled Folder/.ipynb_checkpoints/cosenos&white_noise-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # Amazon SageMaker Feature Store: How to securely store an image dataset in your Feature Store with a KMS key? # This notebook demonstrates how to securely store a dataset of images into your Feature Store using a KMS key. This is demonstrated using the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). # # The example in this notebook starts by retrieving the dataset from an Amazon S3 bucket (you can substitute your own S3 bucket storing your image dataset), and then prepare your dataset for ingestion to an online or offline feature store. We use a [Key Management Service (KMS)](https://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html) key for server-side encryption to ensure that your data is securely stored in your feature store. Finally, we query the ingested dataset from your feature store and then demonstrate how to retrieve your image dataset. # # This notebook uses KMS key for server side encryption for your Feature Store. For more information on server-side encryption, see [Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html). # # # If you would like to encrypt your data on the client side prior to ingestion, see [Amazon SageMaker Feature Store: Client-side Encryption using AWS Encryption SDK](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_client_side_encryption.html) for a demonstration. # # ## Overview # 1. Set up # 2. Load in your image data set # 3. Create Feature Groups and ingest your encrypted data into them # 4. Query your data in your feature store using Amazon Athena # 5. Plot your image data set # # ## Prerequisites # This notebook uses the Python SDK library for Feature Store, and the `Python 3 (Data Science)` kernel. To encrypt your data with KMS key for server side encryption, you will need to have an active KMS key. If you do not have a KMS key, then you can create one by following the [KMS Policy Template](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html#KMS-Policy-Template) steps, or you can visit the [KMS section in the console](https://console.aws.amazon.com/kms/home) and follow the button prompts for creating a KMS key. This notebook is compatible with SageMaker Studio, Jupyter, and JupyterLab. # # ## Library Dependencies: # * `sagemaker>=2.0.0` # * `numpy` # * `pandas` # * `boto3` # # ## Data # This notebook uses the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). # + from time import gmtime, strftime from sagemaker.feature_store.feature_group import FeatureGroup import sagemaker import boto3 import pandas as pd import numpy as np import pickle import gzip import time import ast import matplotlib.pyplot as plt import os.path # - # ## Set up sagemaker_session = sagemaker.Session() s3_bucket_name = sagemaker_session.default_bucket() # This is the bucket for your offline store. public_s3_bucket_name = "sagemaker-sample-files" # This is the name of the public S3 bucket. prefix = 'sagemaker-featurestore-demo' role = sagemaker.get_execution_role() region = sagemaker_session.boto_region_name # ## Download MNIST # We are using the MNIST data set. It is stored on a publically available S3 bucket. Below is a method to download a file to your current working directory. We use it to download the MNIST data set from our public S3 bucket that already has the data. # + def download_file_from_s3(bucket, path, filename): """ Download filename to your current directory. Parameters: bucket: S3 bucket name path: path to file filename: the name of the file you are downloading Returns: None """ if not os.path.exists(filename): s3 = boto3.client('s3', region_name = 'us-east-1') s3.download_file( Bucket = bucket, Key = path, Filename = filename ) download_file_from_s3(public_s3_bucket_name, path='datasets/image/MNIST/mnist.pkl.gz', filename='mnist.pkl.gz') # - # ## Additional - Helper Method # Below is a method that you can use to get images from your S3 bucket into a `numpy` array. Specifically, if you have `jpg` or `jpeg` images in a S3 bucket that you want to load directly into a `numpy` array, then you can provide the bucket name, `s3_bucket_name`, and prefix path, `prefix_path` to `load_images_into_array` which does just this. Note: This is an additional method that you can use, but we do not use it in this notebook. # + def load_images_into_array(s3_bucket_name, prefix_path): """ Return a numpy array of images. Parameters: s3_bucket_name: S3 bucket name prefix_path: path to images in your S3 bucket Returns: Numpy array. """ s3 = boto3.resource('s3') bucket = s3.Bucket(s3_bucket_name) def s3_get_image_paths(bucket, prefix_path, img_exts=['jpg', 'jpeg']): """ Return a list of paths of images. Parameters: bucket: S3 bucket name prefix_path: path to images in your S3 bucket img_exts: image extentions Returns: A list of paths to images. """ img_path_lst = [] for _ in bucket.objects.filter(Prefix=prefix_path): if _.key.endswith(tuple(img_exts)): img_path_lst.append(_.key) return img_path_lst img_path_lst = s3_get_image_paths(bucket, prefix_path) lst = [] for _ in img_path_lst: object = bucket.Object(_) response = object.get() file_stream = response['Body'] lst.append(np.array(Image.open(file_stream))) return np.array(lst) # Below demonstrates how to use this method. #img_lst = load_images_into_array(s3_bucket_name, prefix_path=image_path) # - # ## Unzip and load in dataset with gzip.open('mnist.pkl.gz', 'rb') as f: train_set, validation_set, test_set = pickle.load(f, encoding='latin1') train_x, train_y = train_set # Reshape the image so it can be plotted train_x = train_x.reshape(train_x.shape[0], 28, 28) # In the following example, we plot a single image. plt.imshow(train_x[0]) plt.show() # ## Create a data frame of our images. # We represent the image as a flattened array and also store the original shape of the image in our data frame. Both will be in our data frame that will be ingested into your feature store. # # **Important:** At this time, Feature store only supports flattened images with maximum length 350k. # + def create_data_frame(img_lst, col_names=["img", "shape"]): """ Return a Pandas data frame where each row corresponds to an image represented as an array, the original shape of that image and an id. Parameters: img_lst: a list of images. col_names: names of the columns in your data frame Returns: Pandas data frame. """ img_col = [] img_shape_col = [] ids = [] for index, img in enumerate(img_lst): img_flat = img.reshape(-1) img_as_str = str(np.array2string(img_flat, precision=2, separator=',', suppress_small=True)).encode('utf-8') img_shape = list(img.shape) img_col.append(img_as_str) img_shape_col.append(img_shape) ids.append(index) return pd.DataFrame({"id": ids, col_names[0]: img_col, col_names[1]:img_shape_col}) df = create_data_frame(train_x[:5]) # - df.head() df.dtypes def cast_object_to_string(data_frame): """ Cast all columns of data_frame of type object to type string and return it. Parameters: data_frame: A pandas Dataframe Returns: Data frame """ for label in data_frame.columns: if data_frame.dtypes[label] == object: data_frame[label] = data_frame[label].astype("str").astype("string") return data_frame # Cast columns of df of type object to string. df = cast_object_to_string(df) df.head() # ## Create your Feature Group and Ingest your data into it # # Below we start by appending the `EventTime` feature to your data to timestamp entries, then we load the feature definition, and instantiate the Feature Group object. Then lastly we ingest the data into your feature store. feature_group_name = 'mnist-feature-group-' + strftime('%d-%H-%M-%S', gmtime()) # Instantiate a `FeatureGroup` object for your data. feature_group = FeatureGroup(name=feature_group_name, sagemaker_session=sagemaker_session) record_identifier_feature_name = "id" # Append the `EventTime` feature to your data frame. This parameter is required, and time stamps each data point. current_time_sec = int(round(time.time())) event_time_feature_name = "EventTime" # append EventTime feature df[event_time_feature_name] = pd.Series([current_time_sec]*len(df), dtype="float64") # Load Feature Definition's of your data into your feature group. feature_group.load_feature_definitions(data_frame=df) # Create your feature group. # # **Important**: You will need to substitute your KMS Key ARN for `kms_key` for server side encryption (SSE). The cell below demonstrates how to enable SSE for an offline store. If you choose to use an online store, you will need to assign `enable_online_store` to `True`. To enable SSE for an online store you will need to assign `online_store_kms_key_id` to your KMS key. feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, offline_store_kms_key_id = kms_key # Substitute kms_key with your kms key. ) feature_group.describe() # Continually check your offline store until your data is available in it. # + def check_feature_group_status(feature_group): """ Print when the feature group has been successfully created Parameters: feature_group: FeatureGroup Returns: None """ status = feature_group.describe().get("FeatureGroupStatus") while status == "Creating": print("Waiting for Feature Group to be Created") time.sleep(5) status = feature_group.describe().get("FeatureGroupStatus") print(f"FeatureGroup {feature_group.name} successfully created.") check_feature_group_status(feature_group) # - # Ingest your data into your feature group. feature_group.ingest(data_frame=df, max_workers=5, wait=True) time.sleep(30) # + s3_client = sagemaker_session.boto_session.client('s3', region_name=region) feature_group_s3_uri = feature_group.describe().get("OfflineStoreConfig")\ .get("S3StorageConfig").get("ResolvedOutputS3Uri") feature_group_s3_prefix = feature_group_s3_uri.replace(f"s3://{s3_bucket_name}/", "") offline_store_contents = None while offline_store_contents is None: objects_in_bucket = s3_client.list_objects(Bucket=s3_bucket_name,\ Prefix=feature_group_s3_prefix) if ('Contents' in objects_in_bucket and len(objects_in_bucket['Contents']) > 1): offline_store_contents = objects_in_bucket['Contents'] else: print('Waiting for data in offline store...\n') time.sleep(60) print('Data available.') # - # ## Use Amazon Athena to Query your Encrypted Data in your Feature Store # Using Amazon Athena, we query the image data set that we stored in our feature store to demonstrate how to extract your data set of images. query = feature_group.athena_query() table = query.table_name query_table = 'SELECT * FROM "'+table+'"' print('Running ' + query_table) # Run the Athena query query.run(query_string=query_table, output_location='s3://'+\ s3_bucket_name+'/'+prefix+'/query_results/') time.sleep(60) dataset = query.as_dataframe() print(dataset.dtypes) # Below is the data queried from your feature store. dataset def parse_show_image(df): """ Return a numpy array of your images that have been reshaped into it's corresponding shape. Parameters: df: dataframe of your data Returns: Numpy array """ import ast images = [] for index, entry in enumerate(np.array(df['img'])): entry = entry.strip('b') entry = np.array(ast.literal_eval(ast.literal_eval(entry))) shape = ast.literal_eval(df['shape'][index]) entry = entry.reshape(shape[0], shape[1]) images.append(entry) return np.array(images) images = parse_show_image(dataset) # Below shows the shape of your image data set. images.shape # Plot the images to demonstrate that you can view the images stored in your feature store. for img in images: plt.imshow(img) plt.show() # ## Clean up resources # Remove the Feature Group that was created. feature_group.delete() # ## Next steps # # In this notebook we covered how to securely store data sets of images in a feature store using KMS key. # # If you are interested in understanding more on how server-side encryption is done with Feature Store, see [Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_kms_key_encryption.html). # # If you are interested in understanding how to do client-side encryption to encrypt your image data set prior to storing it in your feature store, see [Amazon SageMaker Feature Store: Client-side Encryption using AWS Encryption SDK](https://sagemaker-examples.readthedocs.io/en/latest/sagemaker-featurestore/feature_store_client_side_encryption.html). For more information on the AWS Encryption library, see [AWS Encryption SDK library](https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/introduction.html). # # For detailed information about Feature Store, see the [Developer Guide](https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store.html).
sagemaker-featurestore/feature_store_securely_store_images.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''yolo'': conda)' # name: python3 # --- import os base='D:\WWF_Det\WWF_Data\Raw_Data/rest-all/' cp_base=base+'-novideo/' os.system('cp -R '+base +' '+cp_base) folder_list=os.listdir(cp_base) for i in folder_list: video_folder=cp_base+i+'/videos/' os.system('rm -rf '+video_folder) # + # -*- coding: UTF-8 -*- import os import pandas as pd import numpy as np # + tags=[] df=pd.read_csv('D:/WWF_Det/WWF_Det/Label_data_building/Url_transform/Raw_csv/rest-part1.csv') df=df.drop(['异常信息','数据集ID','数据集名称'], axis=1) df=df.reset_index().drop(['index'],axis=1) df_store=pd.DataFrame(columns=['id','tag','Path','url','video_path','video_url']) for index, row in df.iterrows(): eng_class=['baichunlu','chihu','zanghu','maoniu','ma','mashe','yang','yanyang','person','xuebao','malu','lanmaji','gaoyuanshanchun','gaoyuantu', \ 'lang','pao','shidiao','sheli','lv','chai','hanta','zongxiong','huangmomao', 'anfuxueji','banchishanchun','banweizhenji','chihu+shidiao','danfuxueji','gaoyuanxuetu','gou','hongcuiya','hongsun','hongzuishanya','hongweiqu','huangyou','huwujiu', 'kuang','laoshu','maque','niaolei','paolu','shanque','shiji','shitu','shu','shutu','wuya','xiangyou','xique','xuege','xueji','xuezhi','you', 'zongbeidong' ] chi_class=['白唇鹿','赤狐','藏狐','牦牛','马','马麝','羊','岩羊','人','雪豹','马鹿','蓝马鸡','高原山鹑','高原兔', \ '狼','狍','石貂','猞猁','藏野驴','豺','旱獭','棕熊','荒漠猫', '暗腹雪鸡','斑翅山鹑','斑尾榛鸡','赤狐+石貂','淡腹雪鸡','高原雪兔','狗','红嘴山鸦','红隼','红嘴山鸦','红尾雀','黄鼬','胡兀鹫','鵟','老鼠','麻雀','鸟类','狍鹿', '山雀','石鸡','石兔','鼠','鼠兔','乌鸦','香鼬','喜鹊','雪鸽','雪鸡','血雉','鼬','棕背鸫'] file_dir=row['文件路径'].replace('rest-p1-novideo','rest-p1',1) cate_name=row['文件路径'].split('/')[1] video_dir=file_dir.replace('frames','videos',1) file_path=file_dir+row['文件名称'] houzhui_list=['.MOV','.AVI','.MP4'] for i in houzhui_list: check_path='D:/WWF_Det/WWF_Data/Raw_Data/'+video_dir+row['文件名称'][:5]+i #print(check_path) if os.path.exists(check_path): video_path=check_path.replace('D:/WWF_Det/WWF_Data/Raw_Data/','',1) for a,b in zip(eng_class,chi_class): if cate_name==a: cate_name=b df_store.loc[index,'tag']=cate_name df_store.loc[index,'Path']=file_path df_store.loc[index,'video_path']=video_path df_store.loc[index,'url']=row['文件地址'] df_store=df_store.sort_values(by="Path" , ascending=True) df_store['id']=np.arange(1, len(df_store)+1) df_store['video_url']='None' df_store.to_csv(r'D:\WWF_Det\WWF_Det\Label_data_building\Url_transform\Post_csv\rest-part1.csv',index=False,encoding="utf_8_sig") # - df_store=df_store.sort_values(by="Path" , ascending=True) df_store
Label_data_building/Url_transform/transform_url_top14_part3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="T7kuFBTlIrB5" import tensorflow as tf # + id="_R6ewRrBIwCF" class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.99): print("\nReached 99% accuracy so cancelling training!") self.model.stop_training = True # + id="i2xqZkhBI3jH" outputId="0a368745-18bf-4a35-9dbb-62f353f8d817" colab={"base_uri": "https://localhost:8080/"} mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # + id="0JlYFrBEJZUo" outputId="9a0cd6dd-0d2a-4e50-bbc7-288c3be7d445" colab={"base_uri": "https://localhost:8080/"} x_train[0] # + id="SYxYJtKII7fk" callbacks = myCallback() # + colab={"base_uri": "https://localhost:8080/"} id="rEHcB3kqyHZ6" outputId="1187b2cb-14bd-4075-b8b9-184ec5af6a1d" model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=10, callbacks=[callbacks]) # + id="uI3ycwSxJIrO" outputId="b2a4c9b5-560b-4e15-b05c-284df68d675a" colab={"base_uri": "https://localhost:8080/"} model.evaluate(x_test, y_test) # + id="rZPbLhMOLPEx" # + id="zX4Kg8DUTKWO" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
Write_SomeThink.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/student/W1D3_Tutorial6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] colab_type="text" id="fDXAYdIHuyCV" # # # Neuromatch Academy: Week 1, Day 3, Tutorial 6 # # Model Selection: Bias-variance trade-off # # + [markdown] colab_type="text" id="YfiTGyEaaLrP" # #Tutorial Objectives # # This is Tutorial 6 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of linear models by generalizing to multiple linear regression (Tutorial 4). We then move on to polynomial regression (Tutorial 5). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 6) and two common methods for model selection, AIC and Cross Validation (Tutorial 7). # # # In this tutorial, we will learn about the bias-variance tradeoff and see it in action using polynomial regression models. # # Tutorial objectives: # # * Understand difference between test and train data # * Compare train and test error for models of varying complexity # * Understand how bias-variance tradeoff relates to what model we choose # + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 518} colab_type="code" id="ThcPwgvzvV-6" outputId="4ebaf563-470b-4b64-8e90-03778fa1600d" #@title Video Bias Variance Tradeoff from IPython.display import YouTubeVideo video = YouTubeVideo(id="c6EjIrhkUpc", width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video # + [markdown] colab_type="text" id="Jj5ChX3NGWfN" # # Setup # + cellView="form" colab={} colab_type="code" id="3Yuh005kCT0v" #@title Imports import numpy as np import matplotlib.pyplot as plt # + cellView="form" colab={} colab_type="code" id="2s0veBvFX8fz" #@title Figure Settings # %matplotlib inline fig_w, fig_h = (8, 6) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) # %config InlineBackend.figure_format = 'retina' # + cellView="form" colab={} colab_type="code" id="s4IASbPMGbPX" #@title Helper functions def ordinary_least_squares(x, y): """Ordinary least squares estimator for linear regression. Args: x (ndarray): design matrix of shape (n_samples, n_regressors) y (ndarray): vecto"r of measurements of shape (n_samples) Returns: ndarray: estimated parameter values of shape (n_regressors) """ return np.linalg.inv(x.T @ x) @ x.T @ y def make_design_matrix(x, order): """Create the design matrix of inputs for use in polynomial regression Args: x (ndarray): An array of shape (samples,) that contains the input values. max_order (scalar): The order of the polynomial we want to fit Returns: numpy array: The design matrix containing x raised to different powers """ # Broadcast to shape (n x 1) if shape (n, ) so this function generalizes to multiple inputs if x.ndim == 1: x = x[:,None] #if x has more than one feature, we don't want multiple columns of ones so we assign # x^0 here design_matrix = np.ones((x.shape[0],1)) # Loop through rest of degrees and stack columns for degree in range(1, order+1): design_matrix = np.hstack((design_matrix, x**degree)) return design_matrix def solve_poly_reg(x, y, max_order): """Fit a polynomial regression model for each order 0 through max_order. Args: x (ndarray): An array of shape (samples, ) that contains the input values y (ndarray): An array of shape (samples, ) that contains the output values max_order (scalar): The order of the polynomial we want to fit Returns: numpy array: (input_features, max_order+1) Each column contains the fitted weights for that order of polynomial regression """ # Create a dictionary with polynomial order as keys, and np array of theta # (weights) as the values theta_hat = {} # Loop over polynomial orders from 0 through max_order for order in range(max_order+1): X = make_design_matrix(x, order) this_theta = ordinary_least_squares(X, y) theta_hat[order] = this_theta return theta_hat # + [markdown] colab_type="text" id="XDrVC2QUGjYm" # # Train vs test data # # The data used for the fitting procedure for a given model is the **training data**. In tutorial 5, we computed MSE on the training data of our polynomial regression models and compared training MSE across models. An additional important type of data is **test data**. This is held-out data that is not used (in any way) during the fitting procedure. When fitting models, we often want to consider both the train error (the quality of prediction on the training data) and the test error (the quality of prediction on the test data) as we will see in the next section. # # + [markdown] colab_type="text" id="PViOfczipLwN" # # Bias-variance tradeoff # # Finding a good model can be difficult. One of the most important concepts to keep in mind when modeling is the **bias-variance tradeoff**. # # **Bias** is the difference between the prediction of the model and the corresponding true output variables you are trying to predict. Models with high bias will not fit the training data well since the predictions are quite different from the true data. These high bias models are overly simplified - they do not have enough parameters and complexity to accurately capture the patterns in the data and are thus **underfitting**. # # # **Variance** refers to the variability of model predictions for a given input. Essentially, do the model predictions change a lot with changes in the exact training data used? Models with high variance are highly dependent on the exact training data used - they will not generalize well to test data. These high variance models are **overfitting** to the data. # # In essence: # # * High bias, low variance models have high train and test error. # * Low bias, high variance models have low train error, high test error # *Low bias, low variance models have low train and test error # # # As we can see from this list, we ideally want low bias and low variance models! These goals can be in conflict though - models with enough complexity to have low bias also tend to overfit and depend on the training data more. We need to decide on the correct tradeoff. # # In this section, we will see the bias-variance tradeoff in action with polynomial regression models of different orders. # # + [markdown] colab_type="text" id="VeNPDxXU1rC4" # ### Data generation & model fitting # # We first generate some noisy data using a similar process as in Tutorial 5. # # We will generate our training data the exact same way as in Tutorial 5. However, now we will also generate test data. We want to see how our model generalizes beyond the range of values see in the training phase. To accomplish this, we will generate x from a wider range of values ([-3, 3]). We then plot the train and test data together. # # We will then fit polynomial regression models of orders 0-5 just as we did in Tutorial 5. # + colab={"base_uri": "https://localhost:8080/", "height": 403} colab_type="code" id="NQ5S5rdacdJ8" outputId="2d57baa3-27d4-49cc-c80f-e4f7091379d0" ### Generate training data np.random.seed(0) n_samples = 50 x_train = np.random.uniform(-2, 2.5, n_samples) # sample from a uniform distribution over [-2, 2.5) noise = np.random.randn(n_samples) # sample from a standard normal distribution y_train = x_train**2 - x_train - 2 + noise ### Generate testing data n_samples = 20 x_test = np.random.uniform(-3, 3, n_samples) # sample from a uniform distribution over [-2, 2.5) noise = np.random.randn(n_samples) # sample from a standard normal distribution y_test = x_test**2 - x_test - 2 + noise ## Plot both train and test data fig, ax = plt.subplots() plt.title('Training & Test Data') plt.plot(x_train, y_train, '.', label='Training') plt.plot(x_test, y_test, 'g+', label='Test') plt.legend() plt.xlabel('x') plt.ylabel('y'); # + [markdown] colab_type="text" id="460SfMjNv29X" # ## Run this cell to get estimated parameters # + colab={} colab_type="code" id="RA8j7rA2KUMr" # Let's fit some models! max_order = 5 theta_hat = solve_poly_reg(x_train, y_train, max_order) # + [markdown] colab_type="text" id="d_VBZm0G3_7E" # #### Exercise: Compute and compare train vs test error # # We will use MSE as our error metric again. Compute MSE on training data ($x_{train},y_{train}$) and test data ($x_{test}, y_{test}$ for each polynomial regression model (orders 0-5). Hint: we computed MSE on train data in Tutorial 5 so port those concepts/code here # # *Please think about after completing exercise before reading the following text! Do you think the order 0 model has high or low bias? High or low variance? How about the order 5 model?* # + colab={} colab_type="code" id="jP8QQ4HaNlP-" def compute_mse(x_train,x_test,y_train,y_test,theta_hat,max_order): """Compute MSE on training data and test data. Args: x_train(ndarray):An array of shape (samples, ) that contains the training set input values. x_test(ndarray): An array of shape (samples,) that contains the test set values. y_train(ndarray): An array of shape (samples, ) that contains the output values. y_test(ndarray): An array of shape (samples, ) that contains the output testing set. theta_hat(numpy array): (input_features, max_order+1) Each column contains the fitted weights for that order of polynomial regression max_order (scalar): The order of the polynomial we want to fit Returns: mse_train: MSE error on training data for each order mse_test: MSE error on test data for each order """ ####################################################### ## TODO for students: calculate mse error for both sets ## Hint: look back at tutorial 5 where we calculated MSE ####################################################### #uncomment below and fill in with your code mse_train = np.zeros((max_order+1)) #for order in range(0, max_order+1): #X_design_train = YOUR CODE HERE #y_hat = YOUR CODE HERE #residuals = YOUR CODE HERE #mse_train[order] = YOUR CODE HERE mse_test = np.zeros((max_order+1)) #for order in range(0, max_order+1): #X_design_test =YOUR CODE HERE #y_hat = YOUR CODE HERE #residuals = YOUR CODE HERE #mse_test[order] = YOUR CODE HERE # comment this out when you've filled in the function raise NotImplementedError("Student excercise: calculate mse for train and test set") return mse_train, mse_test # Uncomment once exercise complete #mse_train, mse_test = compute_mse(x_train,x_test,y_train,y_test,theta_hat,max_order) #width = .35 #plt.figure() #plt.bar(np.arange(max_order+1) - width/2, mse_train, width, label="train MSE") #plt.bar(np.arange(max_order+1) + width/2, mse_test , width, label="test MSE") #plt.legend() #plt.xlabel('polynomial order') #plt.ylabel('MSE') #plt.title('comparing polynomial fits'); # + [markdown] cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 412} colab_type="text" id="43rGelCd49hQ" outputId="126f1ff7-52f6-4dd4-a3e4-eb8319204eb6" # [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D3_ModelFitting/solutions/W1D3_Tutorial6_Solution_b22019c5.py) # # *Example output:* # # <img alt='Solution hint' align='left' width=506 height=406 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D3_ModelFitting/static/W1D3_Tutorial6_Solution_b22019c5_0.png> # # # + [markdown] colab_type="text" id="EYAfAFFg5Oyd" # As we can see from the plot above, more complex models (higher order polynomials) have lower MSE for training data. The overly simplified models (orders 0 and 1) have high MSE on the training data. As we add complexity to the model, we go from high bias to low bias. # # The MSE on test data follows a different pattern. The best test MSE is for an order 2 model. Both simple models and more complex models have higher test MSE. # # So to recap: # # Order 0 model: High bias, low variance # # Order 5 model: Low bias, high variance # # Order 2 model: Just right, low bias, low variance # # + [markdown] colab_type="text" id="K8k32Ks3N38G" # # Summary # # Training data is the data used for fitting, test data is held-out data. # # We need to strike the right balance between bias and variance. # # Too complex models have low bias and high variance. Too simple models have high bias and low variance. We want to find a model with optimal model complexity that has both low bias and low variance
tutorials/W1D3_ModelFitting/student/W1D3_Tutorial6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow as tf import numpy as np data=np.load("C:\\MLDatabases\\google-images-download-master\\downloads\\100x100_resized_images_as_numpy\\x_non_alpha_composed.npy",mmap_mode='r') labels=np.load("C:\\MLDatabases\\google-images-download-master\\downloads\\100x100_resized_images_as_numpy\\y_non_alpha_composite1.npy",mmap_mode='r') def create_weights(shape,name,stddev=0.003): return tf.Variable(initial_value=tf.truncated_normal(shape=shape, mean=0, stddev=stddev, dtype="float32", seed=10),dtype="float32",name=name) tf.set_random_seed(10) np.random.seed(10) # + tf.reset_default_graph() def get_graph(): strides=(1,2,2,1) with tf.device("/device:CPU:0"): x=tf.placeholder(tf.float32,shape=(None,100,100,3),name="x") y=tf.placeholder(tf.int64,shape=(None,),name="y") lr=tf.placeholder(tf.float32,shape=(),name="lr") dr=tf.placeholder(tf.float32,shape=(),name="dr") #y_onehot=tf.one_hot(y,2) conv_w1=create_weights((10,10,3,2),"conv_w1") conv_w2=create_weights((6,6,2,4),"conv_w2") conv_w3=create_weights((3,3,4,8),"conv_w3") conv_w4=create_weights((2,2,8,8),"conv_w4") #conv1_mean=create_weights((16,),"conv1_mean",1) #conv2_mean=create_weights((16,),"conv2_mean",1) #conv3_mean=create_weights((16,),"conv3_mean",1) #conv4_mean=create_weights((16,),"conv4_mean",1) #conv1_variance=create_weights((16,),"conv1_variance",1) #conv2_variance=create_weights((16,),"conv2_variance",1) #conv3_variance=create_weights((16,),"conv3_variance",1) #conv4_variance=create_weights((16,),"conv4_variance",1) w_fc1=create_weights((7*7*8,512),"w_fc1") fc1_bias=create_weights((512,),"fc1_bias") w_fc2=create_weights((512,2),"w_fc2") fc2_bias=create_weights((2,),"fc2_bias") conv1=tf.nn.conv2d(input=x,filter=conv_w1,strides=strides,padding="SAME",name="conv1") #conv1=45 conv1_relu=tf.nn.relu(conv1,name="relu_conv1") conv1_bn=tf.layers.batch_normalization(conv1_relu,training=True) #conv1_bn=tf.nn.batch_normalization(x=conv1_relu,mean=conv1_mean,variance=conv1_variance,variance_epsilon=0.000004,offset=None,scale=None) conv2=tf.nn.conv2d(input=conv1_bn,filter=conv_w2,strides=strides,padding="SAME",name="conv2") #conv2=21 conv2_relu=tf.nn.relu(conv2,name="relu_conv2") conv2_bn=tf.layers.batch_normalization(conv2_relu,training=True) #conv2_bn=tf.nn.batch_normalization(x=conv2_relu,mean=conv2_mean,variance=conv2_variance,variance_epsilon=0.000004,offset=None,scale=None) conv3=tf.nn.conv2d(input=conv2_bn,filter=conv_w3,strides=strides,padding="SAME",name="conv3") #conv3=10 conv3_relu=tf.nn.relu(conv3,name="relu_conv3") conv3_bn=tf.layers.batch_normalization(conv3_relu,training=True) #conv3_bn=tf.nn.batch_normalization(x=conv3_relu,mean=conv3_mean,variance=conv3_variance,variance_epsilon=0.000004,offset=None,scale=None) conv4=tf.nn.conv2d(input=conv3_bn,filter=conv_w4,strides=strides,padding="SAME",name="conv4") #conv4=5 conv4_relu=tf.nn.relu(conv4,name="relu_conv4") conv4_bn=tf.layers.batch_normalization(conv4_relu,training=True) #conv4_bn=tf.nn.batch_normalization(x=conv4_relu,mean=conv4_mean,variance=conv4_variance,variance_epsilon=0.000004,offset=None,scale=None) flat_feature=tf.reshape(tensor=conv4_bn,shape=[-1,7*7*8],name="flat_feature") print(flat_feature) fc1=tf.add(tf.matmul(flat_feature,w_fc1),fc1_bias,name="fc1") fc1_dropout=tf.nn.dropout(fc1,keep_prob=dr) logits=tf.add(tf.matmul(fc1_dropout,w_fc2),fc2_bias,name="fc2") with tf.device("/device:CPU:0"): softmaxed_logits=tf.nn.softmax(logits,name="softmaxed_logits") loss=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(None,labels=y, logits=logits),name="loss") update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimizer=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss) accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(softmaxed_logits,axis=1), y),dtype=tf.float32)) return optimizer,loss,accuracy,x,y,lr,dr # - optimizer,loss,accuracy,x,y,lr,dr=get_graph() init_op=tf.global_variables_initializer() saver = tf.train.Saver() indexes=np.arange(2907) np.random.shuffle(indexes) np.random.shuffle(indexes) np.random.shuffle(indexes) # + train=indexes[0:2300] test=indexes[2300:None] x_train=data[train] y_train=labels[train] x_test=data[test] y_test=labels[test] # - EPOCHS=50 BATCH_SIZE=8 n_samples=2907 with tf.Session() as sess: sess.run(init_op) for e in range(EPOCHS): start_idx=0 for s in range(0,2300,BATCH_SIZE): x_batch=x_train[s:s+BATCH_SIZE] y_batch=y_train[s:s+BATCH_SIZE] if s%1000==0: _,s_loss,s_accu=sess.run([optimizer,loss,accuracy],feed_dict={x:x_batch,y:y_batch,lr:0.0001,dr:0.4}) print("Epoch",e,"Iteration",s,"Loss: ",s_loss,"Accuracy: ",s_accu) else: _,s_loss,s_accu=sess.run([optimizer,loss,accuracy],feed_dict={x:x_batch,y:y_batch,lr:0.0001,dr:0.4}) """ update the testing results """ s_cost,test_accu=sess.run([loss,accuracy],feed_dict={x:x_test,y:y_test,lr:0.001,dr:1.0}) print("Epoch",e,"Loss: ",s_cost,"Accuracy: ",test_accu) """ Save the model """ model_file="C:\\MLDatabases\\google-images-download-master\\downloads\\model" saver.save(sess, save_path=model_file) # + import tensorflow as tf meta_path = 'C:\\MLDatabases\\google-images-download-master\\downloads\\model.meta' # Your .meta file output_node_names = ['softmaxed_logits:0'] # Output nodes with tf.Session() as sess: # Restore the graph saver = tf.train.import_meta_graph(meta_path) # Load weights saver.restore(sess,'C:\\MLDatabases\\google-images-download-master\\downloads\\model') # Freeze the graph frozen_graph_def = tf.graph_util.convert_variables_to_constants( sess, sess.graph_def, output_node_names) # Save the frozen graph with open('C:\\MLDatabases\\google-images-download-master\\downloads\\model_as_string.pb', 'wb') as f: f.write(frozen_graph_def.SerializeToString())
Image classification using CNN/cnn_classifier_training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # # Introduction Notebook # # Estimated time needed: **10** minutes # # ## Objectives # # After completing this lab you will be able to: # # * Acquire data in various ways # * Obtain insights from data with Pandas library # # <h2>Table of Contents</h2> # # <div class="alert alert-block alert-info" style="margin-top: 20px"> # <ol> # <li><a href="#data_acquisition">Data Acquisition</a> # <li><a href="#basic_insight">Basic Insight of Dataset</a></li> # </ol> # # </div> # <hr> # # <h1 id="data_acquisition">Data Acquisition</h1> # <p> # There are various formats for a dataset: .csv, .json, .xlsx etc. The dataset can be stored in different places, on your local machine or sometimes online.<br> # # In this section, you will learn how to load a dataset into our Jupyter Notebook.<br> # # In our case, the Automobile Dataset is an online source, and it is in a CSV (comma separated value) format. Let's use this dataset as an example to practice data reading. # # <ul> # <li>Data source: <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank">https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data</a></li> # <li>Data type: csv</li> # </ul> # The Pandas Library is a useful tool that enables us to read various datasets into a dataframe; our Jupyter notebook platforms have a built-in <b>Pandas Library</b> so that all we need to do is import Pandas without installing. # </p> # # import pandas library import pandas as pd import numpy as np # <h2>Read Data</h2> # <p> # We use <code>pandas.read_csv()</code> function to read the csv file. In the brackets, we put the file path along with a quotation mark so that pandas will read the file into a dataframe from that address. The file path can be either an URL or your local file address.<br> # # Because the data does not include headers, we can add an argument <code>headers = None</code> inside the <code>read_csv()</code> method so that pandas will not automatically set the first row as a header.<br> # # You can also assign the dataset to any variable you create. # # </p> # # This dataset was hosted on IBM Cloud object. Click <a href="https://cocl.us/DA101EN_object_storage?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01">HERE</a> for free storage. # # + # Import pandas library import pandas as pd # Read the online file by the URL provides above, and assign it to variable "df" other_path = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/auto.csv" df = pd.read_csv(other_path, header=None) # - # After reading the dataset, we can use the <code>dataframe.head(n)</code> method to check the top n rows of the dataframe, where n is an integer. Contrary to <code>dataframe.head(n)</code>, <code>dataframe.tail(n)</code> will show you the bottom n rows of the dataframe. # # show the first 5 rows using dataframe.head() method print("The first 5 rows of the dataframe") df.head(5) # <div class="alert alert-danger alertdanger" style="margin-top: 20px"> # <h1> Question #1: </h1> # <b>Check the bottom 10 rows of data frame "df".</b> # </div> # # Write your code below and press Shift+Enter to execute df.tail(10) # <details><summary>Click here for the solution</summary> # # ```python # print("The last 10 rows of the dataframe\n") # df.tail(10) # ``` # # <h3>Add Headers</h3> # <p> # Take a look at our dataset. Pandas automatically set the header with an integer starting from 0. # </p> # <p> # To better describe our data, we can introduce a header. This information is available at: <a href="https://archive.ics.uci.edu/ml/datasets/Automobile?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank">https://archive.ics.uci.edu/ml/datasets/Automobile</a>. # </p> # <p> # Thus, we have to add headers manually. # </p> # <p> # First, we create a list "headers" that include all column names in order. # Then, we use <code>dataframe.columns = headers</code> to replace the headers with the list we created. # </p> # # create headers list headers = ["symboling","normalized-losses","make","fuel-type","aspiration", "num-of-doors","body-style", "drive-wheels","engine-location","wheel-base", "length","width","height","curb-weight","engine-type", "num-of-cylinders", "engine-size","fuel-system","bore","stroke","compression-ratio","horsepower", "peak-rpm","city-mpg","highway-mpg","price"] print("headers\n", headers) # We replace headers and recheck our dataframe: # df.columns = headers df.head(10) # We need to replace the "?" symbol with NaN so the dropna() can remove the missing values: # df1=df.replace('?','NaN') # We can drop missing values along the column "price" as follows: # df=df1.dropna(subset=["price"], axis=0) df.head(20) # Now, we have successfully read the raw dataset and added the correct headers into the dataframe. # # <div class="alert alert-danger alertdanger" style="margin-top: 20px"> # <h1> Question #2: </h1> # <b>Find the name of the columns of the dataframe.</b> # </div> # # Write your code below and press Shift+Enter to execute df1.columns # <details><summary>Click here for the solution</summary> # # ```python # print(df.columns) # ``` # # </details> # # <h2>Save Dataset</h2> # <p> # Correspondingly, Pandas enables us to save the dataset to csv. By using the <code>dataframe.to_csv()</code> method, you can add the file path and name along with quotation marks in the brackets. # </p> # <p> # For example, if you would save the dataframe <b>df</b> as <b>automobile.csv</b> to your local machine, you may use the syntax below, where <code>index = False</code> means the row names will not be written. # </p> # # + active="" # df.to_csv("automobile.csv", index=False) # - # We can also read and save other file formats. We can use similar functions like **`pd.read_csv()`** and **`df.to_csv()`** for other data formats. The functions are listed in the following table: # # <h2>Read/Save Other Data Formats</h2> # # | Data Formate | Read | Save | # | ------------ | :---------------: | --------------: | # | csv | `pd.read_csv()` | `df.to_csv()` | # | json | `pd.read_json()` | `df.to_json()` | # | excel | `pd.read_excel()` | `df.to_excel()` | # | hdf | `pd.read_hdf()` | `df.to_hdf()` | # | sql | `pd.read_sql()` | `df.to_sql()` | # | ... | ... | ... | # # <h1 id="basic_insight">Basic Insight of Dataset</h1> # <p> # After reading data into Pandas dataframe, it is time for us to explore the dataset.<br> # # There are several ways to obtain essential insights of the data to help us better understand our dataset. # # </p> # # <h2>Data Types</h2> # <p> # Data has a variety of types.<br> # # The main types stored in Pandas dataframes are <b>object</b>, <b>float</b>, <b>int</b>, <b>bool</b> and <b>datetime64</b>. In order to better learn about each attribute, it is always good for us to know the data type of each column. In Pandas: # # </p> # df.dtypes # A series with the data type of each column is returned. # # check the data type of data frame "df" by .dtypes print(df.dtypes) # <p> # As shown above, it is clear to see that the data type of "symboling" and "curb-weight" are <code>int64</code>, "normalized-losses" is <code>object</code>, and "wheel-base" is <code>float64</code>, etc. # </p> # <p> # These data types can be changed; we will learn how to accomplish this in a later module. # </p> # # <h2>Describe</h2> # If we would like to get a statistical summary of each column e.g. count, column mean value, column standard deviation, etc., we use the describe method: # # + active="" # dataframe.describe() # - # This method will provide various summary statistics, excluding <code>NaN</code> (Not a Number) values. # df.describe() # <p> # This shows the statistical summary of all numeric-typed (int, float) columns.<br> # # For example, the attribute "symboling" has 205 counts, the mean value of this column is 0.83, the standard deviation is 1.25, the minimum value is -2, 25th percentile is 0, 50th percentile is 1, 75th percentile is 2, and the maximum value is 3. <br> # # However, what if we would also like to check all the columns including those that are of type object? <br><br> # # You can add an argument <code>include = "all"</code> inside the bracket. Let's try it again. # # </p> # # describe all the columns in "df" df.describe(include = "all") # <p> # Now it provides the statistical summary of all the columns, including object-typed attributes.<br> # # We can now see how many unique values there, which one is the top value and the frequency of top value in the object-typed columns.<br> # # Some values in the table above show as "NaN". This is because those numbers are not available regarding a particular column type.<br> # # </p> # # <div class="alert alert-danger alertdanger" style="margin-top: 20px"> # <h1> Question #3: </h1> # # <p> # You can select the columns of a dataframe by indicating the name of each column. For example, you can select the three columns as follows: # </p> # <p> # <code>dataframe[[' column 1 ',column 2', 'column 3']]</code> # </p> # <p> # Where "column" is the name of the column, you can apply the method ".describe()" to get the statistics of those columns as follows: # </p> # <p> # <code>dataframe[[' column 1 ',column 2', 'column 3'] ].describe()</code> # </p> # # Apply the method to ".describe()" to the columns 'length' and 'compression-ratio'. # # </div> # # Write your code below and press Shift+Enter to execute df[['length', 'compression-ratio']].describe() # <details><summary>Click here for the solution</summary> # # ```python # df[['length', 'compression-ratio']].describe() # ``` # # </details> # # <h2>Info</h2> # Another method you can use to check your dataset is: # # + active="" # dataframe.info() # - # It provides a concise summary of your DataFrame. # # This method prints information about a DataFrame including the index dtype and columns, non-null values and memory usage. # # look at the info of "df" df.info() # <h1>Excellent! You have just completed the Introduction Notebook!</h1> # # ### Thank you for completing this lab! # # ## Author # # <a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank"><NAME></a> # # ### Other Contributors # # <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank"><NAME> PhD</a> # # <NAME> # # <NAME> # # <NAME> # # Parizad # # <NAME> # # <a href="https://www.linkedin.com/in/fiorellawever/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank"><NAME></a> # # <a href="https://www.linkedin.com/in/yi-leng-yao-84451275/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDA0101ENSkillsNetwork20235326-2021-01-01" target="_blank" ><NAME></a>. # # ## Change Log # # | Date (YYYY-MM-DD) | Version | Changed By | Change Description | # | ----------------- | ------- | ---------- | ---------------------------------------- | # | 2020-10-30 | 2.3 | Lakshmi | Changed URL of the csv | # | 2020-09-22 | 2.2 | Nayef | Added replace() method to remove '?' | # | 2020-09-09 | 2.1 | Lakshmi | Made changes in info method of dataframe | # | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab | # # <hr> # # ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/> #
DataAnalysisWeek1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from xlwings import Workbook, Range wb = Workbook() Range('A1').value = 1000 Range('A1').value Range('Sheet1', 'A1').value from numpy.random import randn Range('A2').value = randn(4, 4) Range('A2', asarray=True).table.value from pandas import DataFrame data = Range('A2').table.value df = DataFrame(data[1:], columns=data[0]) df Range('A10').value = df Range('A17', index=False, header=False).value = df
xlwings.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Q1: # Think of at least three kinds of your favorite pizza. Store these pizza names in a list, and then use a for loop to print the name of each pizza. # + pizza_list = ["Chicken Pizza", "Beaf Pizza", "Mutton Pizza"] ## for loop for x in pizza_list: print(x + " is my favorite") # - # # Q2 # Start with your last question , Modify your for loop to print a sentence using the name of the pizza # instead of printing just the name of the pizza. For each pizza you should # have one line of output containing a simple statement like I like pepperoni # pizza. for y in pizza_list: print(f"I like {y}") # # Q3: # Use a for loop to print the numbers from 1 to 20, # inclusive. for num in range(20): num = num + 1 print(num, end='\n') # # Q4: # Use the third argument of the range() function to make a list # of the odd numbers from 1 to 20. Use a for loop to print each number. for i in range(1,20,2): print(i, end='\n') # # Q5: # Make a list of the multiples of 3 from 3 to 30. Use a for loop to # print the numbers in your list. # + multiple_of_three = [] for i in range(1,30,1): i = i + 1 if i%3 == 0: multiple_of_three.append(i) for j in multiple_of_three: print(j , end='\n') # - # # Q6: # A number raised to the third power is called a cube. For example, # the cube of 2 is written as 2**3 in Python. Make a list of the first 10 cubes (that # is, the cube of each integer from 1 through 10), and use a for loop to print out # the value of each cube for i in range(10): i+=1 print(f"The cube of {i} is {i**3}", end='\n') # # Q7: # ###### Make a python program that conatains your nine favourite dishes in a list called foods. # # ###### Print the message, The first three items in the list are:. # ###### Then use a slice to print the first three items from that program’s list. # # ###### Print the message, Three items from the middle of the list are: # ###### Use a slice to print three items from the middle of the list. # # ###### Print the message, The last three items in the list are: # ###### Use a slice to print the last three items in the list. # + foods = ["Biryani", "Pulaow", "Zarda", "Qorma", "Karahi", "Nihari", "Rasmalai", "Faloodah", "Icecream"] print(f'The First Three Items Are from Rice {foods[slice(0,3)]}') print(f"The middle three items are spicy {foods[slice(3,6)]}") print(f"The last three items are sweets {foods[slice(6,9)]}") # - # # Q8: # ### Start with your program from your last Question8. # ###### Make a copy of the list of foods, and call it friend_foods. # ###### Then, do the following: # ###### Add a new dish to the original list. # ###### Add a different dish to the list friend_foodss. # ###### Prove that you have two separate lists. # ###### Print the message, My favorite pizzas are: and then use a for loop to print the first list. # ###### Print the message, # ###### My friend’s favorite foods are:, and then use a for loop to print the second list. # # ##### NOTE: Make sure each new dish is stored in the appropriate list. # + friend_foods = foods.copy() foods.append("<NAME>") friend_foods.append("Qulfi") if set(friend_foods) == set(foods): print("Both sets are same") else: print("Both sets are different") print(f"My favorite dishes are {foods}") print(f"My friend favorite dishes are {friend_foods}") # - # # Q9: # Take a user input from console line.Store it in a variable called Alien_color. # # If the alien’s color is red, print a statement that the player just earned 5 points for shooting the alien. # # If the alien’s color isn’t green, print a statement that the player just earned 10 points. # # If the alien's color isn't red or green , print a statment :, Alien is no more..... user_input = input("Enter red or green : ") if user_input == 'red': print('The player just earned 5 points for shooting the alien.') elif user_input == 'green': print("The player just earned 10 points.") else: print("Alien is no more.....") user_input = input("Enter red or green : ") if user_input == 'red': print('The player just earned 5 points for shooting the alien.') elif user_input == 'green': print("The player just earned 10 points.") else: print("Alien is no more.....") user_input = input("Enter red or green : ") if user_input == 'red': print('The player just earned 5 points for shooting the alien.') elif user_input == 'green': print("The player just earned 10 points.") else: print("Alien is no more.....") # # Q10: # Write an if-elif-else chain that determines a person’s # stage of life. Set a value for the variable age, and then: # # • If the person is less than 2 years old, print a message that the person is a baby. # # • If the person is at least 2 years old but less than 4, print a message that the person is a toddler. # # • If the person is at least 4 years old but less than 13, print a message that the person is a kid. # # • If the person is at least 13 years old but less than 20, print a message that the person is a teenager. # # • If the person is at least 20 years old but less than 65, print a message that the person is an adult. # # • If the person is age 65 or older, print a message that the person is an elder. # + age = 25 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # + age = 77 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # + age = 17 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # + age = 9 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # + age = 3 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # + age = 1.5 if age >= 65: print('the person is an elder.') elif age >= 20: print('the person is an adult.') elif age >= 13: print('the person is a teenager.') elif age >= 4: print('the person is a kid.') elif age >= 2: print('the person is a toddler.') else: print('the person is a baby.') # - # # Q11: # Do the following to create a program that simulates how websites ensure that everyone has a unique username. # # • Make a list of five or more usernames called current_users. # # • Make another list of five usernames called new_users. # Make sure one or two of the new usernames are also in the current_users list. # # • Loop through the new_users list to see if each new username has already been used. # If it has, print a message that the person will need to enter a new username. # If a username has not been used, print a message saying that the username is available. # # • Make sure your comparison is case insensitive. If 'John' has been used, 'JOHN' should not be accepted. # + current_users = ['umaima', 'dua', 'sada', 'himna', 'areeba'] new_users = ['shakeel', 'shahid', 'shabbir', 'sada', 'umaima'] ## umaima and sada are common in both lists for new_user in new_users: if new_user in current_users: print(f'the person {new_user} need to enter another username') else: print(f'The {new_user} is availabe') # - # # Q12: # Use a dictionary to store information about a person you know. # Store their first name, last name, age, and the city in which they live. # You should have keys such as first_name, last_name, age, and city. # Print each piece of information stored in your dictionary # + info = {'first_name' : 'Shakeel', 'last_name' : 'Haider' , 'age' : '25' , 'city' : 'karachi' } print(info['first_name']) print(info['last_name']) print(info['age']) print(info['city']) # - # # Q13: # Starts with your last question 12 , loop through the dictionary’s keys and values. # When you’re sure that your loop works, add five more Python terms to your # dictionary . When you run your program again, these new words and meanings # should automatically be included in the output. # + print('Iterating over keys') for keys in info: print(keys) print('Iterating over values') for values in info.values(): print(values) print('iterating over key value pair') for key, value in info.items(): print(f'{key} : {value}') # - info['qualification'] = 'BS Software Engineering' info['experience'] = 'Fresh' info['focus_area'] = 'Machine Learning' info['career'] = 'Zero' # + print('iterating over key value pair again') for key, value in info.items(): print(f'{key} : {value}') # - # # Q14: # Make a dictionary containing three major rivers and the country # each river runs through. One key-value pair might be 'nile': 'egypt'. # # # • Use a loop to print a sentence about each river, such as The Nile runs # through Egypt. # # NOTE: use upper case through keys and values. # + major_rivers = {'nile' : 'egypt', 'yangtze' : 'china', 'mississippi' : 'usa'} for key, value in major_rivers.items(): print(f"The {key.upper()} runs through {value.upper()}") # - # # Q15: # Make several dictionaries, where the name of each dictionary is the # name of a pet. In each dictionary, include the kind of animal and the owner’s # name. Store these dictionaries in a list called pets. Next, loop through your list # and as you do print everything you know about each pet. # + dog = {'name' : 'dog', 'kind' : 'friendly', 'owner' : 'zakir bhai'} cat = {'name' : 'cat','kind' : 'friendly', 'owner' : 'shakeel'} panther = {'name' : 'panther','kind' : 'non-friendly', 'owner' : 'shumail'} lion = {'name' : 'lion','kind' : 'non-friendly', 'owner' : 'shujaat'} parrot = {'name' : 'parrot','kind' : 'bird' , 'owner' : 'suleman'} pets = [dog, cat, panther, lion, parrot] for dic in pets: for key, value in dic.items(): print(f" {key.title()} : {value.title()}") print('',end='\n') # -
Assignments/Practise Assignment 3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- print(40* "*") print("Bem vindo ao jogo da adivinhação") print(40* "*") num_secreto = 42 chute = int(input("Digite o seu número: ")) print("Você digitou", chute) # + if num_secreto == chute: print("Você acertou! :)") else: print("Você errou! :(") print("Fim de Jogo") # - type(chute) # O input sempre devolve um string, então devo converter para inteiro minha_idd = 29 dela_idd = 30 if minha_idd == dela_idd: print("idades iguais") else: print("idd diferentes") idd1 = "10" idd2 = "20" print(idd1 + idd2) #str e int eu não consigo somar nome = "Rafa" sobren = "Borto" print(nome, sobren, sep = "_", end = "A")
PY_ 01_Intro_parte_1/Jogo_adivinhacao.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import sys module_path = '/home/diplomski-rad/consensus-net/src/python/dataset/' if module_path not in sys.path: print('Adding dataset module.') sys.path.append(module_path) import dataset # - X, y, X_train, X_validate, y_train, y_validate = dataset.read_dataset_and_reshape_for_conv( './pysam-all-dataset-n3-X.npy', './pysam-all-dataset-n3-y.npy', 0.1) from keras.models import Model from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input from keras.layers import Conv1D, MaxPooling1D, Conv2D # + input_layer = Input(shape=(7, 1, 4)) conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer) flatten = Flatten()(conv_1) predictions = Dense(4, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 200 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate)) # + input_layer = Input(shape=(7, 1, 4)) conv_1 = Conv2D(filters=20, kernel_size=3, padding='same', activation='relu')(input_layer) flatten = Flatten()(conv_1) predictions = Dense(4, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 200 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate)) # + input_layer = Input(shape=(7, 1, 4)) conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer) conv_2 = Conv2D(filters=5, kernel_size=3, padding='same', activation='relu')(conv_1) flatten = Flatten()(conv_2) predictions = Dense(4, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 200 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate)) # + input_layer = Input(shape=(7, 1, 4)) conv_1 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(input_layer) conv_2 = Conv2D(filters=10, kernel_size=3, padding='same', activation='relu')(conv_1) flatten = Flatten()(conv_2) predictions = Dense(4, activation='softmax')(flatten) model = Model(input_layer, predictions) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) print(model.summary()) batch_size = 10000 epochs = 200 model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate)) # -
experiments/karla/diplomski-rad/blade/pb/datasets/n3-all/convnet-all-n3-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Analysis # * As expected, the weather becomes significantly warmer as one approaches the equator (0 Deg. Latitude). More interestingly, however, is the fact that the southern hemisphere tends to be warmer this time of year than the northern hemisphere. This may be due to the tilt of the earth. # * There is no strong relationship between latitude and cloudiness. However, it is interesting to see that a strong band of cities sits at 0, 80, and 100% cloudiness. # * There is no strong relationship between latitude and wind speed. However, in northern hemispheres there is a flurry of cities with over 20 mph of wind. # # + # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import time from pprint import pprint # Import API key from api_keys import api_key # Incorporated citipy to determine city based on latitude and longitude from citipy import citipy # Output File (CSV) output_data_file = "output_data/cities.csv" # Range of latitudes and longitudes lat_range = (-90, 90) lng_range = (-180, 180) # - # ## Generate Cities List # + # List for holding lat_lngs and cities lat_lngs = [] cities = [] # Create a set of random lat and lng combinations lats = np.random.uniform(low=-90.000, high=90.000, size=1500) lngs = np.random.uniform(low=-180.000, high=180.000, size=1500) lat_lngs = zip(lats, lngs) # Identify nearest city for each lat, lng combination for lat_lng in lat_lngs: city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name # If the city is unique, then add it to a our cities list if city not in cities: cities.append(city) # Print the city count to confirm sufficient count len(cities) # - base_url = 'http://api.openweathermap.org/data/2.5/weather?' units = 'imperial' # + latitudes = [] temperatures = [] humidities = [] cloudiness = [] wind_speeds = [] for i, city in enumerate(cities): q_url = f'{base_url}appid={api_key}&q={city}&units={units}' try: weather = requests.get(q_url).json() latitudes.append(weather['coord']['lat']) humidities.append(weather['main']['humidity']) cloudiness.append(weather['clouds']['all']) wind_speeds.append(weather['wind']['speed']) temperatures.append(weather['main']['temp']) print(f'Processed Record {i} | {city}') except (KeyError, IndexError): print('City not found... skipping') # - print([len(latitudes), len(humidities), len(cloudiness), len(wind_speeds)]) # + weather_df = pd.DataFrame(latitudes, humidities).reset_index() weather_df = weather_df.rename(columns={'index': 'Humidity', 0: 'Latitude'}) # - weather_df['Temperature'] = temperatures weather_df['Wind Speed'] = wind_speeds weather_df['Cloudiness'] = cloudiness weather_df.to_csv('Weather Data.csv') weather_df.head() weather_df.plot(kind='scatter', x='Latitude', y='Temperature', alpha=.7, edgecolor='black', s=75, figsize=(15,10)) plt.grid() plt.title('City Latitude vs Temperature (F) (11/05/2018)', fontsize=20) plt.xlim(-60,80) plt.ylim(-30,100) plt.savefig('Graphs/City Latitude vs Temp') weather_df.plot(kind='scatter', x='Latitude', y='Humidity', alpha=.7, edgecolor='black', s=75, figsize=(15,10)) plt.grid() plt.title('City Latitude vs Humidity (11/05/2018)', fontsize=20) plt.xlim(-60,80) plt.savefig('Graphs/City Latitude vs Humidity') weather_df.plot(kind='scatter', x='Latitude', y='Cloudiness', alpha=.7, edgecolor='black', s=75, figsize=(15,10)) plt.grid() plt.title('City Latitude vs Cloudiness (11/05/2018)', fontsize=20) plt.xlim(-60,80) plt.savefig('Graphs/City Latitude vs Cloudiness') weather_df.plot(kind='scatter', x='Latitude', y='Wind Speed', alpha=.7, edgecolor='black', s=75, figsize=(15,10)) plt.grid() plt.title('City Latitude vs Wind Speed (mph) (11/05/2018)', fontsize=20) plt.xlim(-60,80) plt.savefig('Graphs/City Latitude vs Wind Speed')
Homework 6/Homework 6 - Will Doucet.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # TSG036 - Controller logs # ======================== # # Get the last ‘n’ hours of controller logs. # # Steps # ----- # # ### Parameters # + tags=["parameters"] since_hours = 2 since_seconds = since_hours * 3600 # seconds in hour coalesce_duplicates = True # - # ### Instantiate Kubernetes client # + tags=["hide_input"] # Instantiate the Python Kubernetes client into 'api' variable import os try: from kubernetes import client, config from kubernetes.stream import stream if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ: config.load_incluster_config() else: config.load_kube_config() api = client.CoreV1Api() print('Kubernetes client instantiated') except ImportError: from IPython.display import Markdown display(Markdown(f'SUGGEST: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.')) raise # - # ### Get the namespace for the big data cluster # # Get the namespace of the big data cluster from the Kuberenetes API. # # NOTE: If there is more than one big data cluster in the target # Kubernetes cluster, then set \[0\] to the correct value for the big data # cluster. # + tags=["hide_input"] # Place Kubernetes namespace name for BDC into 'namespace' variable try: namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name except IndexError: from IPython.display import Markdown display(Markdown(f'SUGGEST: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'SUGGEST: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'SUGGEST: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print('The kubernetes namespace for your big data cluster is: ' + namespace) # - # ### Get controller logs # + container = "controller" pod_list = api.list_namespaced_pod(namespace, label_selector="app=controller") entries_for_analysis = [] for pod in pod_list.items: print (f"Logs for controller pod: {pod.metadata.name}") try: logs = api.read_namespaced_pod_log(pod.metadata.name, namespace, container=container, since_seconds=since_seconds) except Exception as err: print(f"ERROR: {err}") pass else: if coalesce_duplicates: previous_line = "" duplicates = 1 for line in logs.split('\n'): if line[27:] != previous_line[27:]: if duplicates != 1: print(f"\t{previous_line} (x{duplicates})") print(f"\t{line}") duplicates = 1 else: duplicates = duplicates + 1 continue if line[25:34] == "| ERROR |" or line[25:33] == "| WARN |": entries_for_analysis.append(line) previous_line = line else: print(logs) print (f"There were {len(entries_for_analysis)} warnings and errors found.") # - # ### Analyze log entries and suggest relevant Troubleshooting Guides # + tags=["hide_input"] # Analyze log entries and suggest further relevant troubleshooting guides from IPython.display import Markdown tsgs = [ ["""doc is missing key: /data""", """../repair/tsg038-doc-is-missing-key-error.ipynb""", """TSG038 - BDC create failures due to - doc is missing key"""], ["""Failed when starting controller service. System.TimeoutException: Operation timed out after 10 minutes""", """../repair/tsg057-failed-when-starting-controller.ipynb""", """TSG057 - Failed when starting controller service. System.TimeoutException"""]] suggestions = 0 for entry in entries_for_analysis: print (entry) for tsg in tsgs: if entry.find(tsg[0]) != -1: display(Markdown(f'SUGGEST: Use [{tsg[2]}](tsg[1]) to resolve this issue.')) suggestions = suggestions + 1 print("") print(f"{len(entries_for_analysis)} log entries analyzed. {suggestions} further troubleshooting suggestions made inline.") # - print('Notebook execution complete.') # Related # ------- # # - [TSG027 - Observe cluster # deployment](../diagnose/tsg027-observe-bdc-create.ipynb)
Big-Data-Clusters/GDR1/public/content/log-analyzers/tsg036-get-controller-logs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + slideshow={"slide_type": "-"} from __future__ import print_function, division import numpy as np import uproot import uproot_methods import awkward # histogram creation and manipulation from fnal_column_analysis_tools import hist # nbagg is the interactive matplotlib backend # %matplotlib nbagg import matplotlib.pyplot as plt # histogram display (kept separate from hist to remove matplotlib dependancy where unnecessary) from fnal_column_analysis_tools.hist import plot # - # let's borrow some example data from uproot # See https://mybinder.org/v2/gh/scikit-hep/uproot/master?filepath=binder%2Ftutorial.ipynb # !curl -O http://scikit-hep.org/uproot/examples/HZZ.root fin = uproot.open("HZZ.root") tree = fin["events"] # + # let's build the lepton arrays back into objects # in the future, some of this verbosity can be reduced arrays = {k.replace('Electron_', ''): v for k,v in tree.arrays("Electron_*", namedecode='ascii').items()} p4 = uproot_methods.TLorentzVectorArray.from_cartesian( arrays.pop('Px'), arrays.pop('Py'), arrays.pop('Pz'), arrays.pop('E'), ) electrons = awkward.JaggedArray.zip(p4=p4, **arrays) arrays = {k.replace('Muon_', ''): v for k,v in tree.arrays("Muon_*", namedecode='ascii').items()} p4 = uproot_methods.TLorentzVectorArray.from_cartesian( arrays.pop('Px'), arrays.pop('Py'), arrays.pop('Pz'), arrays.pop('E'), ) muons = awkward.JaggedArray.zip(p4=p4, **arrays) print("Avg. electrons/event:", electrons.counts.sum()/tree.numentries) print("Avg. muons/event:", muons.counts.sum()/tree.numentries) # - # Show electrons in the first event with some electrons[electrons.counts > 0][0].tolist() # + # Two types of axes exist presently: bins and categories lepton_kinematics = hist.Hist("Events", hist.Cat("flavor", "Lepton flavor"), hist.Bin("pt", "$p_{T}$", 20, 0, 100), hist.Bin("eta", "$\eta$", [-2.5, -1.4, 0, 1.4, 2.5]), ) # Pass keyword arguments to fill, all arrays must be flat numpy arrays # User is responsible for ensuring all arrays have same jagged structure! lepton_kinematics.fill(flavor="electron", pt=electrons['p4'].pt.flatten(), eta=electrons['p4'].eta.flatten()) lepton_kinematics.fill(flavor="muon", pt=muons['p4'].pt.flatten(), eta=muons['p4'].eta.flatten()) # + # looking at lepton pt for all eta lepton_pt = lepton_kinematics.project("eta", overflow='under') fig, ax, primitives = plot.plot1d(lepton_pt, overlay="flavor", stack=True, fill_opts={'alpha': .5, 'edgecolor': (0,0,0,0.3)}) # all matplotlib primitives are returned, in case one wants to tweak them # e.g. maybe you really miss '90s graphics... primitives['legend'].shadow = True # - # Clearly the yields are much different, are the shapes similar? lepton_pt.label = "Density" fig, ax, primitives = plot.plot1d(lepton_pt, overlay="flavor", density=True) # ...somewhat, maybe electrons are a bit softer # Let's stack them, after defining some nice styling stack_fill_opts = {'alpha': 0.8, 'edgecolor':(0,0,0,.5)} stack_error_opts = {'label':'Stat. Unc.', 'hatch':'///', 'facecolor':'none', 'edgecolor':(0,0,0,.5), 'linewidth': 0} # maybe we want to compare different eta regions # plotgrid accepts row and column axes, and creates a grid of 1d plots as appropriate fig, ax = plot.plotgrid(lepton_kinematics, row="eta", overlay="flavor", stack=True, fill_opts=stack_fill_opts, error_opts=stack_error_opts, ) # + # ok lets make some Z candidates ee_cands = electrons.choose(2) mm_cands = muons.choose(2) # filter opposite-sign good_ee = ee_cands.i0['Charge'] + ee_cands.i1['Charge'] == 0 ee_p4 = ee_cands.i0['p4'] + ee_cands.i1['p4'] good_mm = mm_cands.i0['Charge']*mm_cands.i1['Charge'] == -1 mm_p4 = mm_cands.i0['p4'] + mm_cands.i1['p4'] # - # we can also use matplotlib builtin functions for histogramming, if preferred fig, ax = plt.subplots(1,1) _ = plt.hist(ee_p4.mass[good_ee].flatten(), bins=40) # + # for many plot styles, one can and should write the matplotlib routine themselves # let's make a scatter plot of the leading and subleading Z candidate (by delta-mass) # but first we have to calculate the right combinations (which is a bit difficult in pure columnar) # CAUTION: composing combinatorics does not check for duplicates! wrong = mm_cands.choose(2) print(np.unique(wrong.counts)) zz_4e = electrons.choose(4) zz_4m = muons.choose(4) print(np.unique(zz_4m.counts)) # for the ee+mm channel, composing is not an issue as the pairs are already mutually exclusive zz_2e2m = ee_cands.cross(mm_cands) # + ZMASS = 91.1876 def massmetric(cands, i, j): z1mass = (cands['%d' % i]['p4'] + cands['%d' % j]['p4']).mass k, l = set(range(4)) - {i, j} z2mass = (cands['%d' % k]['p4'] + cands['%d' % l]['p4']).mass deltam = np.abs(z1mass - ZMASS) deltaq = np.abs(cands['%d' % i]['Charge'] + cands['%d' % j]['Charge']) # inflate deltam to absurd number if charge sum is nonzero return z1mass, z2mass, deltam + 1e10*deltaq def bestcombination(zzcands): good_charge = sum(zzcands[str(i)]['Charge'] for i in range(4)) == 0 good_event = good_charge.sum() == 1 # this downselection keeps all events where exactly one candidate satisfies the requirement # but does not reduce the number of events, i.e. len(zz_4m) stays the same zzcands = zzcands[good_charge*good_event][:,:1] if zzcands.counts.sum() == 0: # empty array (because a bug in concatenate makes it fail on empty arrays) empty = awkward.JaggedArray.fromcounts(np.zeros(len(zzcands), dtype='i'), []) return empty, empty # now we have to check the permutations of leptons for closest mass to Z boson # only 4 of these 6 permutations are valid charge pairs, but its easier # to compare them all, and assign a large delta mass rather than figure out which # are valid beforehand z1mass = [] z2mass = [] iperm = [] for i,j in [(0,1), (0,2), (0,3), (1,2), (1,3), (2,3)]: z1, z2, idx = massmetric(zzcands, i, j) z1mass.append(z1) z2mass.append(z2) iperm.append(idx) z1mass = awkward.JaggedArray.concatenate(z1mass, axis=1) z2mass = awkward.JaggedArray.concatenate(z2mass, axis=1) iperm = awkward.JaggedArray.concatenate(iperm, axis=1) z1mass = z1mass[iperm.argmin()] z2mass = z2mass[iperm.argmin()] return z1mass, z2mass z1_4m, z2_4m = bestcombination(zz_4m) z1_4e, z2_4e = bestcombination(zz_4e) # for 2e2m its a bit simpler good_charge = (zz_2e2m.i0['Charge'] + zz_2e2m.i1['Charge'] == 0) & (zz_2e2m.i2['Charge'] + zz_2e2m.i3['Charge'] == 0) good_event = good_charge.sum() == 1 zz_2e2m = zz_2e2m[good_event*good_charge][:,:1] za_2e2m, zb_2e2m, deltam_a = massmetric(zz_2e2m, 0, 1) _, _, deltam_b = massmetric(zz_2e2m, 2, 3) # this is a good place for awkward.where, but its not available yet z_2e2m = awkward.JaggedArray.concatenate([za_2e2m, zb_2e2m], axis=1) deltam = awkward.JaggedArray.concatenate([deltam_a, deltam_b], axis=1) z1_2e2m = z_2e2m[deltam.argmin()] z2_2e2m = z_2e2m[deltam.argmax()] # see if any events had candidates in multiple categories # this is extremely rare, but if it happens we would have again to choose a preferred category print("Max candidates/event:", np.max(z1_4e.counts + z1_4m.counts + z1_2e2m.counts)) # + fig, ax = plt.subplots() common = {'linestyle': 'none', 'alpha': 0.7} ax.plot(z2_4e.flatten(), z1_4e.flatten(), marker='d', label=r'$ZZ^{*}\to 4e$', **common) ax.plot(z2_4m.flatten(), z1_4m.flatten(), marker='o', label=r'$ZZ^{*}\to 4\mu$', **common) ax.plot(z2_2e2m.flatten(), z1_2e2m.flatten(), marker='s', label=r'$ZZ^{*}\to 2e2\mu$', **common) ax.legend(title='Channel') ax.set_xlim(0, 120) ax.set_ylim(60, 120) ax.set_xlabel('$Z_2$ mass [GeV]') ax.set_ylabel('$Z_1$ mass [GeV]') # + # What a curious sample, it looks like more of a ZZ background sample # Let's check the 4l mass # for some reason, sum(...) doesn't work, try another route from functools import reduce from operator import add hmass = hist.Hist("Events / 20 GeV", hist.Cat("channel", "Channel"), hist.Bin("mass", r"$m_{ZZ^{*}}$ [GeV]", 15, 0, 300) ) hmass.fill(channel="4e", mass=reduce(add, (zz_4e[str(i)]['p4'] for i in range(4))).mass.flatten()) hmass.fill(channel="4m", mass=reduce(add, (zz_4m[str(i)]['p4'] for i in range(4))).mass.flatten()) hmass.fill(channel="2e2m", mass=reduce(add, (zz_2e2m[str(i)]['p4'] for i in range(4))).mass.flatten()) fig, ax, _ = plot.plot1d(hmass, overlay='channel') # -
binder/plotting-demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Unsupervised Learning: Clustering # + import matplotlib.pyplot as plt import numpy as np import pandas as pd import time from sklearn.metrics.pairwise import euclidean_distances from sklearn.datasets import make_blobs # - # ### Creemos una nube de puntos aleatoria points = make_blobs(n_samples=20, n_features=2, random_state=42, cluster_std=1.5) df_points = pd.DataFrame(points[0], columns = ["x","y"]) df_points["number"] = np.arange(0,len(df_points),1) df_points["label"] = [0]*len(df_points) df_points.head() # ### Grafiquemos los puntos fig = plt.figure(figsize=(12, 9)) x = df_points["x"] y = df_points["y"] c = df_points["label"] plt.scatter(x=x, y=y, s=100, c=c) plt.xlabel("x") plt.ylabel("y") # ### Calculemos matriz de distancia # + X = df_points[["x","y"]] d = euclidean_distances(X, X) np.fill_diagonal(d, np.Inf) d = pd.DataFrame(d) d # - idxmin_row = d.min().idxmin() idxmin_col = d.iloc[idxmin_row,:].idxmin() d # + # fig = plt.figure(figsize=(12, 9)) iterations = int((d.shape[0]**2-d.shape[0])/2) for i in range(1, iterations): ### Find minimum distance idxmin_row = d.min().idxmin() idxmin_col = d.iloc[idxmin_row,:].idxmin() d.iloc[idxmin_row, idxmin_col] = d.iloc[idxmin_col, idxmin_row] = np.Inf if (df_points.loc[idxmin_row, "label"]==0 and df_points.loc[idxmin_col, "label"]==0): ### Re-label with new label equal to index df_points.loc[idxmin_row, "label"] = i df_points.loc[idxmin_col, "label"] = i #print(i) elif (df_points.loc[idxmin_row, "label"]!=0 and df_points.loc[idxmin_col, "label"]==0): label = idxmin_row ### Re-label with pre-existing label df_points.loc[idxmin_col, "label"] = label #print(i) elif (df_points.loc[idxmin_row, "label"]==0 and df_points.loc[idxmin_col, "label"]!=0): label = idxmin_col ### Re-label with pre-existing label df_points.loc[idxmin_row, "label"] = label #print(i) else: label_min = min(idxmin_row, idxmin_col) label_max = max(idxmin_row, idxmin_col) ### Re-label with pre-existing label df_points.loc[:,"label"].replace(to_replace=label_max, value=label_min) #print(i) # x = df_points["x"] # y = df_points["y"] # c = df_points["label"] # cmap = plt.cm.Spectral # plt.scatter(x=x, y=y, s=100, c=c) # plt.show() # display(df_points) time.sleep(3) # - d df_points
ejemplos_de_clase/15_unsupervised_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tuorial on json validation and json schema inspection via `omdata` package import pprint as pp import json # #### Pointers to sample json and schema files schema_file = './data/samples/omsurvey.schema' survey_file = './data/samples/omsurvey.json' with open(schema_file) as schema_fp: om_schema = json.load(schema_fp) # + #pp.pprint(om_schema,indent=0) # - with open(survey_file) as survey_fp: om_example = json.load(survey_fp) # + #pp.pprint(om_example,indent=0) # - # ## Validation from omdata import check_json, check_file help(check_json) check_json(om_example, om_schema) help(check_file) check_file(survey_file, schema_file) # ## Inspecting a given json schema from omdata import Schema, OMSchemaKeyError, OMSchemaUnknown from omdata import get_field_names # ### Retrieving paths to data fields get_field_names(om_schema) # ### Using the omdata.Schema object help(Schema) S = Schema() S.load(om_schema) S.load_from_file(schema_file) # **Note that** the schema to be inspected can be loaded either from a schema object a python `dict` or directly from a file. # ### Checking top level fields S.get_fields(main_only = True) # ### Lsting all data fields including the nested ones # Note that a field followed by an asterix `*` denotes that there might be a multiple of them. In other words the field itself is a Python `list` object. S.get_fields() # ### Checking required fields S.get_required_fields() # ### Listing data field descriptions # in this particular case where the JSON schema is designed for an OpenMaker survey, the description matches the corresponding questionaire items. In other words the source of data where they are populted from. S.get_questionaire_matches()
tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %load_ext autoreload # %autoreload 2 import sys import time import os sys.path.insert(0, '../') # import embedding methods from embedding.bernoulli import Bernoulli from embedding.kl import KL from embedding.matrix_factorization import MatrixFactorization # import evaluation methods from evaluation import evaluate_link_prediction from evaluation import evaluate_node_classification from evaluation import evaluate_node_clustering # # import utils from utils import graph_util from utils import plot_util # visualization # %matplotlib inline import seaborn as sns sns.set_style('whitegrid') import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') # - # ## Experiment 1 # + # Experiment 1 exp = { "max_epochs": 10000, "learning_rate": 1e-2, #Adam "weight_decay": 1e-7, "link_prediction":True, "link_pred_num_rounds": 10, "link_pred_train_ratio": 0.8, "link_pred_eval_every_n_steps": 50, "link_pred_edge_emb_method": "average", "node_classification":False, "node_class_num_rounds": 10, "node_class_train_ratio": 0.2, "node_class_eval_every_n_steps": 50, "node_clustering": False, "node_clustering_num_rounds": 10, "node_clustering_eval_epochs": 50, } datasets = ["cora"] similarities= ['adjacency','laplacian','transition', 'sym_normalized_laplacian','NetMF','ppr','sum_power_tran'] for sim in similarities: model_1 = MatrixFactorization(embedding_dimension=1, similarity_measure=sim) model_2 = MatrixFactorization(embedding_dimension=2, similarity_measure=sim) model_3 = MatrixFactorization(embedding_dimension=3, similarity_measure=sim) model_4 = MatrixFactorization(embedding_dimension=4, similarity_measure=sim) model_8 = MatrixFactorization(embedding_dimension=8, similarity_measure=sim) model_12 = MatrixFactorization(embedding_dimension=12, similarity_measure=sim) model_16 = MatrixFactorization(embedding_dimension=16, similarity_measure=sim) model_24 = MatrixFactorization(embedding_dimension=24, similarity_measure=sim) model_32 = MatrixFactorization(embedding_dimension=32, similarity_measure=sim) model_64 = MatrixFactorization(embedding_dimension=64, similarity_measure=sim) model_128 = MatrixFactorization(embedding_dimension=128, similarity_measure=sim) embedding_methods = [model_1,model_2,model_3, model_4,model_8,model_12, model_16, model_24, model_32, model_64,model_128] # setup folders to store experiment setup summary and results result_folder = plot_util.setup_folders_and_summary_files(exp, datasets, embedding_methods) print(f'The results of the current experiment are stored at experiments/{result_folder}') for dataset in datasets: print(f'##### {dataset} #####\n\n') # load dataset A, y = graph_util.load_dataset(dataset) for model in embedding_methods: print(model.get_method_summary()) start = time.time() # link prediction if(exp["link_prediction"]): link_prediction_folder = result_folder + "/link_prediction" evaluate_link_prediction.expLP(A,dataset,model,exp["link_pred_num_rounds"], link_prediction_folder, train_ratio=exp["link_pred_train_ratio"], edge_emb_method=exp["link_pred_edge_emb_method"],train_epochs=exp["max_epochs"], eval_epochs=exp["link_pred_eval_every_n_steps"], undirected=True) # node classification if(exp["node_classification"]): node_classification_folder = result_folder + "/node_classification" evaluate_node_classification.expNC(A,y,dataset,model,exp["node_class_num_rounds"], node_classification_folder, train_ratio=exp["node_class_train_ratio"], train_epochs=exp["max_epochs"],eval_epochs=exp["node_class_eval_every_n_steps"],undirected=True) # node clustering if(exp["node_clustering"]): node_clustering_folder = result_folder + "/node_clustering" evaluate_node_clustering.exp_Node_Clustering(A,y,dataset,model,exp["node_clustering_num_rounds"], node_clustering_folder, train_epochs=exp["max_epochs"], eval_epochs=exp["node_clustering_eval_epochs"],undirected=True) end = time.time() print(f'Model evaluation took: {end-start} seconds') # -
experiments/MF_Dimensionality_Experiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4 # # Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment. # # This assignment requires that you to find **at least** two datasets on the web which are related, and that you visualize these datasets to answer a question with the broad topic of **sports or athletics** (see below) for the region of **Farmington, Michigan, United States**, or **United States** more broadly. # # You can merge these datasets with data from different regions if you like! For instance, you might want to compare **Farmington, Michigan, United States** to Ann Arbor, USA. In that case at least one source file must be about **Farmington, Michigan, United States**. # # You are welcome to choose datasets at your discretion, but keep in mind **they will be shared with your peers**, so choose appropriate datasets. Sensitive, confidential, illicit, and proprietary materials are not good choices for datasets for this assignment. You are welcome to upload datasets of your own as well, and link to them using a third party repository such as github, bitbucket, pastebin, etc. Please be aware of the Coursera terms of service with respect to intellectual property. # # Also, you are welcome to preserve data in its original language, but for the purposes of grading you should provide english translations. You are welcome to provide multiple visuals in different languages if you would like! # # As this assignment is for the whole course, you must incorporate principles discussed in the first week, such as having as high data-ink ratio (Tufte) and aligning with Cairo’s principles of truth, beauty, function, and insight. # # Here are the assignment instructions: # # * State the region and the domain category that your data sets are about (e.g., **Farmington, Michigan, United States** and **sports or athletics**). # * You must state a question about the domain category and region that you identified as being interesting. # * You must provide at least two links to available datasets. These could be links to files such as CSV or Excel files, or links to websites which might have data in tabular form, such as Wikipedia pages. # * You must upload an image which addresses the research question you stated. In addition to addressing the question, this visual should follow Cairo's principles of truthfulness, functionality, beauty, and insightfulness. # * You must contribute a short (1-2 paragraph) written justification of how your visualization addresses your stated research question. # # What do we mean by **sports or athletics**? For this category we are interested in sporting events or athletics broadly, please feel free to creatively interpret the category when building your research question! # # ## Tips # * Wikipedia is an excellent source of data, and I strongly encourage you to explore it for new data sources. # * Many governments run open data initiatives at the city, region, and country levels, and these are wonderful resources for localized data sources. # * Several international agencies, such as the [United Nations](http://data.un.org/), the [World Bank](http://data.worldbank.org/), the [Global Open Data Index](http://index.okfn.org/place/) are other great places to look for data. # * This assignment requires you to convert and clean datafiles. Check out the discussion forums for tips on how to do this from various sources, and share your successes with your fellow students! # # ## Example # Looking for an example? Here's what our course assistant put together for the **Ann Arbor, MI, USA** area using **sports and athletics** as the topic. [Example Solution File](./readonly/Assignment4_example.pdf) # # ### Links: # - [Detroit Lions Revenue Link, Statistica](https://www.statista.com/statistics/195260/revenue-of-the-detroit-lions-since-2006/) # - [Detroit Lions - Wins/Losses](https://www.pro-football-reference.com/teams/det/index.htm) # - [Detroit Lions Revenue (Tabular)](https://rigrrncl.labs.coursera.org/files/DetroitLionRevenue.xlsx) # # + import pandas as pd import numpy as np import pandas as pd import matplotlib import matplotlib.pyplot as plt import seaborn as sns raw_rev = pd.read_excel('DetroitLionRevenue.xlsx') raw_rec = pd.read_excel('DetroitLions_Record.xlsx') df = raw_rev.merge(raw_rec, how ='inner', left_on='Year', right_on='Year') df['Pct_win'] = df['W']/(df['W']+df['L'])*100 df.set_index(['Year'], inplace=True) df # + # create bar plot of revenue fig = plt.figure(figsize=(14,8)) ax1 = plt.gcf().gca() color = 'tab:blue' #ax1.set_xlabel(df.index, fontsize=16, color = color) ax1.set_ylabel('Team Revenue', fontsize=16, color=color) plt.xticks(fontsize=14) plt.yticks(fontsize=14, color=color) ax1.bar(df.index, df['Revenue'], color=color, alpha=0.4) ax1.set_xticks(df.index); #ax1.rc('xtick', labelsize=10) #line graph ax2 = ax1.twinx() color = 'tab:red' ax2.set_ylabel('Percent of Games Won', fontsize=16, color = color) ax2.plot(df.index, df['Pct_win'], color=color) ax2.tick_params(axis='y', labelcolor=color) plt.yticks(fontsize=14) plt.title('Detroit Lions Revenue vs Percentage Wins', fontsize=28) fig.tight_layout() # -
Python/Applied_Visualizations_Assignment+4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="LnVNtG0Rz7B8" # # Sequential rumour stance prediction # # Rumours tend to change over time; people focus on different aspects of a story.<br> # We could view this process as distributions in word or sentence embedding space that evolve over time. However, the evolution is erratic in that it is relatively constant for short periods (within rumours) and can be volatile for longer periods (between rumours). # # Here we will tackle this problem using a semi-supervised variant of a [Subspace Aligned Classifier](https://www.cv-foundation.org/openaccess/content_iccv_2013/papers/Fernando_Unsupervised_Visual_Domain_2013_ICCV_paper.pdf). # # In this experiment, we arrange the rumours in time and align all previous rumours to the current rumour. In this way, we build up a large data set of class-aligned embedding distributions. # # # + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="YQ8Sg9SJ5aaG" outputId="b4e0ce81-6e8e-4758-c47d-588452c12be7" # !pip install sklearn imblearn scipy pandas # + colab={} colab_type="code" id="_6Tpnq0Dz7CC" def warn(*args, **kwargs): pass import warnings warnings.warn = warn import numpy as np import pandas as pd import pickle as pc import dateutil import numpy.random as rnd from scipy.linalg import eig, eigh, svd from scipy.spatial.distance import pdist, cdist, squareform from sklearn.decomposition import PCA from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression, LogisticRegressionCV from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.preprocessing import label_binarize from imblearn.over_sampling import RandomOverSampler, SMOTE from imblearn.pipeline import make_pipeline from collections import Counter from IPython.core.debugger import Tracer # + colab={} colab_type="code" id="DoEzb3aIz7CO" # %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable # Set font size fS = 20 # + colab={} colab_type="code" id="rMdAZe0Wz7CX" def is_pos_def(A): """Check for positive definiteness.""" return np.all(np.real(np.linalg.eigvals(A)) > 0) def reg_cov(X): """ Regularize covariance matrix until non-singular. Parameters ---------- C : array square symmetric covariance matrix. Returns ------- C : array regularized covariance matrix. """ # Number of data points N = X.shape[0] # Compute mean of data muX = np.mean(X, axis=0, keepdims=1) # Compute covariance matrix without regularization SX = (X - muX).T @ (X - muX) / N # Initialize regularization parameter reg = 1e-8 # Keep going until non-singular while not is_pos_def(SX): # Compute covariance matrix with regularization SX = (X - muX).T @ (X - muX) / N + reg*np.eye(X.shape[1]) # Increment reg reg *= 10 # Report regularization if reg > 1e-5: print('Final regularization parameter = {}'.format(reg)) return SX def align_data(X, Z, CX, CZ, V): """ Align data to components and transform source. Parameters ---------- X : array source data set (N samples x D features) Z : array target data set (M samples x D features) CX : array source principal components (D features x d subspaces) CZ : array target principal component (D features x d subspaces) V : array transformation matrix (d subspaces x d subspaces) Returns ------- X : array transformed source data (N samples x d subspaces) Z : array projected target data (M samples x d subspaces) """ # Map source data onto source principal components XC = X @ CX # Align projected source data to target components XV = XC @ V # Map target data onto target principal components ZC = Z @ CZ return XV, ZC def subspace_alignment(X, Z, subspace_dim=1): """ Compute subspace and alignment matrix. Parameters ---------- X : array source data set (N samples x D features) Z : array target data set (M samples x D features) subspace_dim : int Dimensionality of subspace to retain (def: 1) Returns ------- V : array transformation matrix (D features x D features) CX : array source principal component coefficients CZ : array target principal component coefficients """ # Data shapes N, DX = X.shape M, DZ = Z.shape # Check for sufficient samples if (N < subspace_dim) or (M < subspace_dim): raise ValueError('Too few samples for subspace dimensionality.') # Assert equivalent dimensionalities if not DX == DZ: raise ValueError('Dimensionalities of X and Z should be equal.') # Compute covariance matrices SX = np.cov(X.T) SZ = np.cov(Z.T) # Eigendecomposition for d largest eigenvectors valX, vecX = eigh(SX, eigvals=(DX - subspace_dim, DX-1)) valZ, vecZ = eigh(SZ, eigvals=(DZ - subspace_dim, DZ-1)) # Sort eigenvectors x descending eigenvalues CX = vecX[:, np.argsort(np.real(valX))[::-1]] CZ = vecZ[:, np.argsort(np.real(valZ))[::-1]] # Optimal linear transformation matrix V = CX.T @ CZ # Return transformation matrix and principal component coefficients return V, CX, CZ def align_classes(X, Y, Z, u, CX, CZ, V): """ Project each class separately. Parameters ---------- X : array source data set (N samples x D features) Y : array source labels (N samples x 1) Z : array target data set (M samples x D features) u : array target labels (m samples x 2) CX : array source principal components (K classes x D features x d subspaces) CZ : array target principal components (K classes x D features x d subspaces) V : array transformation matrix (K classes x d subspaces x d subspaces) Returns ------- X : array transformed X (N samples x d features) Z : array transformed Z (M samples x d features) """ # Number of source samples N = X.shape[0] # Number of classes K = len(np.unique(Y)) # Subspace dimensionality d = V.shape[1] # Preallocate XV = np.zeros((N, d)) for k in range(K): # Project the k-th class XV[Y == k, :] = X[Y == k, :] @ CX[k] @ V[k] # Indices of all target samples with label k uk = u[u[:, 1] == k, 0] # Mean of labeled target samples muZk = np.mean(Z[uk, :], axis=0, keepdims=1) # Remove mean after projection XV[Y == k, :] -= np.mean(XV[Y == k, :], axis=0, keepdims=1) # Center the projected class on mean of labeled target samples XV[Y == k, :] += muZk @ CZ # Project target data onto components Z = Z @ CZ return XV, Z def semi_subspace_alignment(X, Y, Z, u, subspace_dim=1): """ Compute subspace and alignment matrix, for each class. Parameters ---------- X : array source data set (N samples x D features) Y : array source labels (N samples x 1) Z : array target data set (M samples x D features) u : array target labels, first column is index in Z, second column is label (m samples x 2) subspace_dim : int Dimensionality of subspace to retain (def: 1) Returns ------- V : array transformation matrix (K, D features x D features) CX : array source principal component coefficients CZ : array target principal component coefficients """ # Data shapes N, DX = X.shape M, DZ = Z.shape # Check for sufficient samples if (N < subspace_dim) or (M < subspace_dim): raise ValueError('Too few samples for subspace dimensionality.') # Assert equivalent dimensionalities if not DX == DZ: raise ValueError('Dimensionalities of X and Z should be equal.') # Number of classes K = len(np.unique(Y)) for k in range(K): # Check number of samples per class Nk = np.sum(Y == k) # Check if subspace dim is too large if (Nk < subspace_dim): # Reduce subspace dim subspace_dim = min(subspace_dim, Nk) # Report print('Reducing subspace dim to {}'.format(subspace_dim)) # Total covariance matrix of target data SZ = reg_cov(Z) # Eigendecomposition for first d eigenvectors valZ, vecZ = eigh(SZ, eigvals=(DZ - subspace_dim, DZ-1)) # Sort eigenvectors x descending eigenvalues CZ = vecZ[:, np.argsort(np.real(valZ))[::-1]] # Use k-nn to label target samples kNN = KNeighborsClassifier(n_neighbors=1) U = kNN.fit(Z[u[:, 0], :], u[:, 1]).predict(Z) # Preallocate CX = np.zeros((K, DX, subspace_dim)) V = np.zeros((K, subspace_dim, subspace_dim)) # For each class, align components for k in range(K): # Take means muXk = np.mean(X[Y == k, :], axis=0, keepdims=1) muZk = np.mean(Z[U == k, :], axis=0, keepdims=1) # Compute covariance matrix of current class SXk = reg_cov(X[Y == k, :]) SZk = reg_cov(Z[U == k, :]) # Eigendecomposition for first d eigenvectors valX, vecX = eigh(SXk, eigvals=(DX - subspace_dim, DX-1)) valZ, vecZ = eigh(SZk, eigvals=(DZ - subspace_dim, DZ-1)) # Sort based on descending eigenvalues CX[k] = vecX[:, np.argsort(np.real(valX))[::-1]] vecZ = vecZ[:, np.argsort(np.real(valZ))[::-1]] # Aligned source components V[k] = CX[k].T @ vecZ # Return transformation matrix and principal component coefficients return V, CX, CZ # + colab={} colab_type="code" id="kqS-YG8Qz7Ch" # Classifier parameters classifier = 'svm' kernel = 'rbf' degree = 3 l2 = 1.0 # Set subspace dimensionality subspace_dim = 10 # Number of target samples to be labeled nT = 5 # + colab={"base_uri": "https://localhost:8080/", "height": 219} colab_type="code" id="NDJl5GWtz7DK" outputId="a80a970c-91c4-4cdd-e32d-75c695e91717" # Load tweets dataframe tweets = pd.read_json('RumEval19.json') # Load tweets in embedding space X = np.load('rumeval19.npy') # Impute NaN's with 0's X[np.isnan(X)] = 0 # Reduce dimensionality X = PCA(n_components=subspace_dim).fit_transform(X) # Sort tweets by time tweets = tweets.sort_values(by=['datetime']) dates_ = tweets['datetime'].values dates = np.unique(tweets['datetime'].values) sortix = np.argsort(dates_) # Subselect labels and map to numerical labels_ = tweets['label'].values.tolist() labels = np.unique(labels_) Y = np.array([np.argwhere(label == labels)[0][0] for label in labels_]) K = len(np.unique(Y)) rumours_ = tweets['rumour'].values[sortix] indexes = np.unique(rumours_, return_index=True)[1] all_rumours = [rumours_[index] for index in sorted(indexes)] # Remove rumours with too few samples cutoff = 50 rumours = [] for rumour in all_rumours: # Number of samples for current rumour num_rumour = np.sum(rumours_ == rumour) if num_rumour >= cutoff: rumours.append(rumour) print('{} rumours discarded for having less than {} samples.\n'.format(len(all_rumours) - len(rumours), cutoff)) print('Remaining rumours:') [print(rumour) for rumour in rumours] # Sort embedding X = X[sortix, :] nR = len(rumours) # + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" id="2n_Zc3s9GnnY" outputId="e03fc3c7-e8f4-4792-d55e-5178979c818f" nT # + colab={"base_uri": "https://localhost:8080/", "height": 5363} colab_type="code" id="NrL1hIHzz7Dm" outputId="6043dec2-a2f6-491c-bfd7-1b6939ce8f8e" # Number of repetitions of the experiment nI = 10 # Preallocate performance array allp_perf = np.zeros((nR-1, nI)) prev_perf = np.zeros((nR-1, nI)) same_perf = np.zeros((nR-1, nI)) unsp_allp_perf = np.zeros((nR-1, nI)) semi_allp_perf = np.zeros((nR-1, nI)) unsp_prev_perf = np.zeros((nR-1, nI)) semi_prev_perf = np.zeros((nR-1, nI)) sub_dim_r = np.zeros((nR-1, nI)) mappings = [] # Start iterating for n in range(nI): print('At repetition {}/{}'.format(n, nI)) # Start looping over rumours for r in range(1, len(rumours)): print('At rumour ' + rumours[r]) # Create training data from all past rumours allp_index = tweets['rumour'].isin(rumours[slice(0,r)]).values # Create training data from previous rumour prev_index = (tweets['rumour'] == rumours[r-1]).values.tolist() # Find all tweets from current rumour curr_index = (tweets['rumour'] == rumours[r]).values.tolist() # Split out all past data allp_X = X[allp_index, :] allp_Y = Y[allp_index] # Split out training data prev_X = X[prev_index, :] prev_Y = Y[prev_index] # Split out test data curr_X = X[curr_index, :] curr_Y = Y[curr_index] # Set up over-sampling using specified classifier allp_X, allp_Y = RandomOverSampler().fit_resample(allp_X, allp_Y) prev_X, prev_Y = RandomOverSampler().fit_resample(prev_X, prev_Y) # Check label proportions print('Labels of all previous rumours, after resampling: \t %s' % Counter(allp_Y)) print('Labels of previous rumour, after resampling: \t %s' % Counter(prev_Y)) print('Labels of current rumour, without resampling: \t %s' % Counter(curr_Y)) # Sample size NA = allp_X.shape[0] NP = prev_X.shape[0] NC = curr_X.shape[0] # Subspace dimensionality for current rumour if n == 0: sub_dim_r[r-1] = min(subspace_dim, min(NC, min(NA, NP))) # Random selection of target labels per class for semi-supervision u0 = np.zeros((0,1)) u1 = np.zeros((0,1)) for k in range(K): # Check for enough target samples per class if np.sum(curr_Y == k) < nT: # Update number of target samples per class nT = np.sum(curr_Y == k) print('Reducing number of labeled target samples to {}'.format(nT)) # Make probabilities of sampling k-th class pYk = (curr_Y==k)/np.sum(curr_Y==k) # Randomly sample ix = rnd.choice(range(NC), size=nT, replace=False, p=pYk) u0 = np.vstack((u0, ix[:,np.newaxis])) u1 = np.vstack((u1, k*np.ones((nT,1)))) u = np.concatenate((u0, u1), axis=1).astype('uint8') # Split out data from current rumour in training and testing trn_curr_X = curr_X[u[:,0],:] trn_curr_Y = u[:, 1] tst_curr_X = curr_X[np.setdiff1d(np.arange(NC), u[:,0]), :] tst_curr_Y = curr_Y[np.setdiff1d(np.arange(NC), u[:,0])] # Find unsupervised alignment of all previous unsp_allp_V, unsp_allp_CX, unsp_allp_CZ = subspace_alignment(allp_X, tst_curr_X, subspace_dim=min(subspace_dim, min(NA, tst_curr_X.shape[0]))) # Find unsupervised alignment of previous unsp_prev_V, unsp_prev_CX, unsp_prev_CZ = subspace_alignment(prev_X, tst_curr_X, subspace_dim=min(subspace_dim, min(NP, tst_curr_X.shape[0]))) # Find semi-supervised alignment of all previous semi_allp_V, semi_allp_CX, semi_allp_CZ = semi_subspace_alignment(allp_X, allp_Y, curr_X, u, subspace_dim=min(subspace_dim, min(NA, NC-nT))) # Find semi-supervised alignment of previous semi_prev_V, semi_prev_CX, semi_prev_CZ = semi_subspace_alignment(prev_X, prev_Y, curr_X, u, subspace_dim=min(subspace_dim, min(NP, NC-nT))) # Align all previous rumours in unsupervised manner unsp_allp_XV, unsp_allp_tst_curr_XC = align_data(allp_X, tst_curr_X, unsp_allp_CX, unsp_allp_CZ, unsp_allp_V) # Align previous rumour in unsupervised manner unsp_prev_XV, unsp_prev_tst_curr_XC = align_data(prev_X, tst_curr_X, unsp_prev_CX, unsp_prev_CZ, unsp_prev_V) # Align all previous rumours in semi-supervised manner semi_allp_XV, semi_allp_tst_curr_XC = align_classes(allp_X, allp_Y, curr_X, u, semi_allp_CX, semi_allp_CZ, semi_allp_V) # Align previous rumour in semi-supervised manner semi_prev_XV, semi_prev_tst_curr_XC = align_classes(prev_X, prev_Y, curr_X, u, semi_prev_CX, semi_prev_CZ, semi_prev_V) # Concate data from current rumour to the all_previous rumour sets allp_X = np.vstack((allp_X, trn_curr_X)) unsp_allp_XV = np.vstack((unsp_allp_XV, trn_curr_X)) semi_allp_XV = np.vstack((semi_allp_XV, trn_curr_X)) allp_Y = np.concatenate((allp_Y, trn_curr_Y), axis=0) # Remove current rumour's training samples from test set semi_allp_tst_curr_XC = semi_allp_tst_curr_XC[np.setdiff1d(np.arange(NC), u[:,0]), :] semi_prev_tst_curr_XC = semi_prev_tst_curr_XC[np.setdiff1d(np.arange(NC), u[:,0]), :] # Initialize support vector machines allp_clf = SVC(kernel=kernel, gamma='auto', C=l2) prev_clf = SVC(kernel=kernel, gamma='auto', C=l2) same_clf = SVC(kernel=kernel, gamma='auto', C=l2) unsp_allp_clf = SVC(kernel=kernel, gamma='auto', C=l2) semi_allp_clf = SVC(kernel=kernel, gamma='auto', C=l2) unsp_prev_clf = SVC(kernel=kernel, gamma='auto', C=l2) semi_prev_clf = SVC(kernel=kernel, gamma='auto', C=l2) # Train classifiers allp_clf.fit(allp_X, allp_Y) prev_clf.fit(prev_X, prev_Y) same_clf.fit(trn_curr_X, trn_curr_Y) unsp_allp_clf.fit(unsp_allp_XV, allp_Y) semi_allp_clf.fit(semi_allp_XV, allp_Y) unsp_prev_clf.fit(unsp_prev_XV, prev_Y) semi_prev_clf.fit(semi_prev_XV, prev_Y) # Make predictions on test set allp_preds = label_binarize(allp_clf.predict(tst_curr_X), classes=np.arange(K)) prev_preds = label_binarize(prev_clf.predict(tst_curr_X), classes=np.arange(K)) same_preds = label_binarize(same_clf.predict(tst_curr_X), classes=np.arange(K)) unsp_allp_preds = label_binarize(unsp_allp_clf.predict(unsp_allp_tst_curr_XC), classes=np.arange(K)) semi_allp_preds = label_binarize(semi_allp_clf.predict(semi_allp_tst_curr_XC), classes=np.arange(K)) unsp_prev_preds = label_binarize(unsp_prev_clf.predict(unsp_prev_tst_curr_XC), classes=np.arange(K)) semi_prev_preds = label_binarize(semi_prev_clf.predict(semi_prev_tst_curr_XC), classes=np.arange(K)) # Cast true labels to label matrix as well tst_curr_Y = label_binarize(tst_curr_Y, classes=np.arange(K)) # Test on data from current day and store allp_perf[r-1,n] = f1_score(tst_curr_Y, allp_preds, labels=np.arange(K), average='micro') prev_perf[r-1,n] = f1_score(tst_curr_Y, prev_preds, labels=np.arange(K), average='micro') same_perf[r-1,n] = f1_score(tst_curr_Y, same_preds, labels=np.arange(K), average='micro') unsp_allp_perf[r-1,n] = f1_score(tst_curr_Y, unsp_allp_preds, labels=np.arange(K), average='micro') semi_allp_perf[r-1,n] = f1_score(tst_curr_Y, semi_allp_preds, labels=np.arange(K), average='micro') unsp_prev_perf[r-1,n] = f1_score(tst_curr_Y, unsp_prev_preds, labels=np.arange(K), average='micro') semi_prev_perf[r-1, n] = f1_score(tst_curr_Y, semi_prev_preds, labels=np.arange(K), average='micro') # # Align classes without over-sampler # tmp_XV, tmp_XC = align_classes(X[trn_index, :], Y[trn_index], tst_X, u, CX, CZ, V) # # Store aligned rumour data # X[trn_index, :] = tmp_XV # X[tst_index, :] = tmp_XC # Compact to DataFrame rum_results = pd.DataFrame({'rumours': rumours[1:], 'allp': np.mean(allp_perf, axis=1), 'prev': np.mean(prev_perf, axis=1), 'same': np.mean(same_perf, axis=1), 'allp_unsp': np.mean(unsp_allp_perf, axis=1), 'allp_semi': np.mean(semi_allp_perf, axis=1), 'prev_unsp': np.mean(unsp_prev_perf, axis=1), 'prev_semi': np.mean(semi_prev_perf, axis=1), }, columns=['rumours', 'f1_all', 'f1_previous', 'f1_same', 'f1_all_unsup-align', 'f1_all_semisup-align', 'f1_previous_unsup-align', 'f1_previous_semisup-align']) rum_results.to_json('resultsF1_rumEval19_subalign_dim' + str(subspace_dim) + '_nT' + str(nT) + '_nI' + str(nI) + '.json') # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="L38TZVZUJOtE" outputId="600edc4a-91b8-4e06-99bf-48b72b504637" rum_results = pd.DataFrame({'rumours': rumours[1:], 'allp': np.mean(allp_perf, axis=1), 'prev': np.mean(prev_perf, axis=1), 'same': np.mean(same_perf, axis=1), 'allp_unsp': np.mean(unsp_allp_perf, axis=1), 'allp_semi': np.mean(semi_allp_perf, axis=1), 'prev_unsp': np.mean(unsp_prev_perf, axis=1), 'prev_semi': np.mean(semi_prev_perf, axis=1), }, columns=['rumours', 'allp', 'same', 'prev', 'allp_unsp', 'allp_semi', 'prev_unsp', 'prev_semi']) rum_results # + colab={"base_uri": "https://localhost:8080/", "height": 736} colab_type="code" id="T1iaT0dMz7EB" outputId="378a0a2e-4db7-4235-c44c-61446d3cb758" # Visualize performance in bar graph # Initialize figure fig, ax = plt.subplots(figsize=(20, 10)) # Bar plot rum_results.plot.bar(ax=ax, x='rumours', y=['allp', 'same', 'prev', 'allp_unsp', 'allp_semi', 'prev_unsp', 'prev_semi'], rot=30, fontsize=fS, legend=True); # Set axes properties ax.set_ylim([0, 1]); ax.legend(fontsize=fS, loc=2); ax.set_xlabel('Rumours', fontsize=fS); ax.set_ylabel('F1-score', fontsize=fS); fig.savefig('resultsF1_rumEval19_subalign_dim' + str(subspace_dim) + '_nT' + str(nT) + '_nI' + str(nI) + '.png', bbox_inches='tight', padding=None) # + colab={"base_uri": "https://localhost:8080/", "height": 269} colab_type="code" id="sKSBDhxhXBfb" outputId="a4584b7c-1590-4137-c965-733ff6abf23b" rum_results # + colab={} colab_type="code" id="xTHAwfuMJDqD"
exp-rumEval19/exp-rumEval19-subalign.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression from sklearn import datasets from sklearn.model_selection import train_test_split # import some data to play with cancer = datasets.load_breast_cancer() x = cancer.data y = cancer.target x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.3,random_state=1) #random_state 種子值 model=LogisticRegression(max_iter = 2000) model.fit(x_train,y_train) y_pred = model.predict(x_test) accuracy = model.score(x_test, y_test) print(model.coef_) #print prediction result print(y_pred) #print accuracy print(accuracy)
mod10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="KpHD9psW7jxO" # # # + id="ldykR0fMftHt" #Download Blender from Repository # !wget https://download.blender.org/release/Blender2.91/blender-2.91.0-linux64.tar.xz # + id="rQ-CKpS5hBFo" #Install Blender # !tar xf blender-2.91.0-linux64.tar.xz # + id="Cbk0J6TYwvnQ" #Download some required libraries # !apt install libboost-all-dev # !apt install libgl1-mesa-dev # + id="h_E1kl9W4iV8" #Connect Google Drive from google.colab import drive drive.mount('/gdrive') # + id="AYLt4dk18KPS" filename = '/gdrive/MyDrive/BlenderRenders/bmw27_gpu.blend' # + id="_p77a65khTRB" #Run Blender # !sudo ./blender-2.91.0-linux64/blender -b $filename -noaudio -P '/gdrive/MyDrive/BlenderRenders/GPU.py' -E 'CYCLES' -f 1 -F 'PNG' # #!sudo ./blender-2.91.0-linux64/blender -b $filename -noaudio -- --cycles-device CUDA -E 'CYCLES' -f 1 -F 'PNG'
Blender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RachidaTOp/github-slideshow/blob/master/Titanic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="p1d1Tpalt-qT" import pandas as pd # + id="rHQH0cSPuZCU" import seaborn as sns # + id="7zmtwjrTufHY" outputId="260cacf8-02f6-4020-98dc-90d9c9914453" colab={"base_uri": "https://localhost:8080/", "height": 204} titanic = sns.load_dataset('titanic') titanic.shape titanic.head() # + id="AKIsJw7Zuiru" outputId="440468de-0338-4a08-b522-6e7c21a1f45f" colab={"base_uri": "https://localhost:8080/", "height": 204} titanic = titanic[['survived', 'pclass', 'sex', 'age']] titanic.dropna(axis=0, inplace=True) titanic['sex'].replace(['male', 'female'], [0, 1], inplace=True) titanic.head() # + id="SW6TYusfuoTp" from sklearn.neighbors import KNeighborsClassifier # + id="YR3nxBDnuryB" y = titanic['survived'] X = titanic.drop('survived', axis=1) # + id="cvzsCdg0uu7-" outputId="9d6855e1-c208-4f7d-d53e-5e49cb6d1c9f" colab={"base_uri": "https://localhost:8080/"} model = KNeighborsClassifier() model.fit(X, y) model.score(X, y) # + id="vDOHtCBnuy1O" outputId="614158fa-403b-4801-c4d2-d4798c254521" colab={"base_uri": "https://localhost:8080/"} model.predict(X) # + id="_k6VUGy_u4pW" def survie(model, pclass=3,sex=1,age=24): x = np.array([pclass, sex, age]).reshape(1, 3) print(model.predict(x)) print(model.predict_proba(x)) # + id="91ysB9zju5gr" outputId="b60fc416-3802-4048-c706-06dca2f4fedd" colab={"base_uri": "https://localhost:8080/", "height": 265} survie(model)
Titanic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd import scipy.optimize as sc import scipy.sparse as sp data = np.random.poisson(100, (10000,1)) data = sp.csr_matrix(data) # + def poiss_loglike(x, data, a): print x return -np.sum(data*np.log(x)-x)*a params = sc.fmin(poiss_loglike, 0, args=(data ,1)) print params
pysal/contrib/spint/notebooks/sparse_scipy_optim.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from keras.models import Sequential from keras.layers import Dense, Conv2D, Conv2DTranspose, BatchNormalization, Flatten, Reshape, LeakyReLU, Activation from keras.activations import sigmoid import tensorflow as tf from utilities import show_graph import numpy as np import matplotlib.pyplot as plt # %matplotlib inline from tensorflow.examples.tutorials.mnist import input_data # - mnist = input_data.read_data_sets('./data', one_hot=False) image_size = (28, 28, 1) # tf.reset_default_graph() def discriminator(print_summary=False): with tf.variable_scope('discriminator'): discriminator = Sequential() discriminator.add(Conv2D(32, (5, 5), strides=2, activation=LeakyReLU(), padding='same', input_shape=image_size)) discriminator.add(Conv2D(64, (5, 5), strides=2, padding='same', input_shape=image_size)) discriminator.add(BatchNormalization()) discriminator.add(LeakyReLU()) discriminator.add(Flatten()) discriminator.add(Dense(1, activation='sigmoid')) if print_summary: print(discriminator.summary()) return discriminator def generator(print_summary=False): z_dim = 100 depth = 128 with tf.variable_scope('generator'): generator = Sequential() generator.add(Dense(7*7*depth, input_dim=z_dim)) generator.add(Reshape((7,7,depth))) generator.add(BatchNormalization()) generator.add(LeakyReLU()) depth = int(depth/2) generator.add(Conv2DTranspose(depth, 5, padding='same', strides=2)) generator.add(BatchNormalization()) generator.add(LeakyReLU()) depth = int(depth/2) generator.add(Conv2DTranspose(depth, 5, padding='same', strides=1)) generator.add(BatchNormalization()) generator.add(LeakyReLU()) depth = int(depth/2) generator.add(Conv2DTranspose(depth, 5, padding='same', strides=1)) generator.add(BatchNormalization()) generator.add(LeakyReLU()) # depth = int(depth/2) generator.add(Conv2DTranspose(1, 5, padding='same', strides=2)) generator.add(BatchNormalization()) generator.add(Activation(sigmoid)) if print_summary: print(generator.summary()) return generator def adversarial_model(generator, discriminator, print_summary=False): adversarial_model = Sequential() adversarial_model.add(generator) discriminator.trainable = False adversarial_model.add(discriminator) if print_summary: adversarial_model.summary() return adversarial_model def mnist_gen(batch_size): while 1: data = mnist.train.next_batch(batch_size) yield data[0].reshape((-1,)+image_size) # + batch_size = 128 z_dim = 100 mnist_data = mnist_gen(batch_size) d = discriminator() g = generator() d_on_g = adversarial_model(g, d) # d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True) # g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True) g.compile(loss='binary_crossentropy', optimizer="adam") d_on_g.compile(loss='binary_crossentropy', optimizer="adam") d.trainable = True d.compile(loss='binary_crossentropy', optimizer="adam") # for epoch in range(100): # print("Epoch is", epoch) # print("Number of batches", int(X_train.shape[0]/BATCH_SIZE)) # for index in range(int(X_train.shape[0]/BATCH_SIZE)): image_batch = next(mnist_data) noise = np.random.randn(batch_size, z_dim) # noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100)) # image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE] generated_images = g.predict(noise, verbose=0) # if index % 20 == 0: # image = combine_images(generated_images) # image = image*127.5+127.5 # Image.fromarray(image.astype(np.uint8)).save( # str(epoch)+"_"+str(index)+".png") X = np.concatenate((image_batch, generated_images)) y = [1] * batch_size + [0] * batch_size d_loss = d.train_on_batch(X, y) # - image_batch.shape g d type(d) generator.compile(loss='binary_crossentropy', optimizer='adam') adversarial_model.compile(loss='binary_crossentropy', optimizer='adam') discriminator.trainable = True discriminator.compile(loss='binary_crossentropy', optimizer='adam') show_graph(tf.get_default_graph().as_graph_def()) # + from IPython.display import SVG from keras.utils.vis_utils import model_to_dot SVG(model_to_dot(discriminator).create(prog='dot', format='svg')) # - # + batch_size = 128 mnist_data = mnist_gen(batch_size) # for i in range(1000): z = np.random.randn(batch_size, z_dim) gen_im = generator.predict(z) mnist_batch = next(mnist_data) x = np.vstack([mnist_batch, gen_im]) y = np.ones((2*batch_size, 1), dtype='float32') y[batch_size:] = 0 discriminator.trainable = True discriminator.train_on_batch(x, y) # y = np.ones((batch_size, 1)) # adversarial_model.train_on_batch(z, y) # - generator.trainable
Lesson 21 - Generative Adversarial Networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Netflix Stock Price Prediction # + import pandas as pd import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import warnings warnings.simplefilter("ignore") # - df = pd.read_csv('NFLX_data.csv') df.sort_values('Date',inplace=True) df.head() df.info() # No missing values found. df.plot(x='Date',y='Close',figsize=(16,8)) close = df[['Close']] from sklearn.preprocessing import MinMaxScaler mm = MinMaxScaler(feature_range=(-1, 1)) close['Close'] = mm.fit_transform(close['Close'].values.reshape(-1,1)) close.head(3) raw = close.as_matrix() print('Shape: ',raw.shape) print('') print(raw[:5]) lookback = 30 data = [] for index in range(len(raw) - lookback): data.append(raw[index: index + lookback]) data = np.array(data) print(data.shape) test_size = int(np.round(0.2*data.shape[0])) train_size = data.shape[0] - (test_size) # + x_train = data[:train_size,:-1,:] y_train = data[:train_size,-1,:] x_test = data[train_size:,:-1] y_test = data[train_size:,-1,:] print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) # - # make training and test sets in torch x_train = torch.from_numpy(x_train).type(torch.Tensor) x_test = torch.from_numpy(x_test).type(torch.Tensor) y_train = torch.from_numpy(y_train).type(torch.Tensor) y_test = torch.from_numpy(y_test).type(torch.Tensor) # + n_steps = lookback - 1 batch_size = 1000 epochs = 120 train = torch.utils.data.TensorDataset(x_train,y_train) test = torch.utils.data.TensorDataset(x_test,y_test) train_loader = torch.utils.data.DataLoader(dataset=train, batch_size=batch_size, shuffle=False) test_loader = torch.utils.data.DataLoader(dataset=test, batch_size=batch_size, shuffle=False) # + input_dim = 1 hidden_dim = 36 num_layers = 2 output_dim = 1 class LSTM(nn.Module): def __init__(self, input_dim, hidden_dim, num_layers, output_dim): super(LSTM, self).__init__() self.hidden_dim = hidden_dim self.num_layers = num_layers self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True) self.fc = nn.Linear(hidden_dim, output_dim) def forward(self, x): h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_() c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_() out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach())) out = self.fc(out[:, -1, :]) return out # + model = LSTM(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, num_layers=num_layers) loss_fn = torch.nn.MSELoss(size_average=True) optimiser = torch.optim.Adam(model.parameters(), lr=0.007) print(model) print(len(list(model.parameters()))) for i in range(len(list(model.parameters()))): print(list(model.parameters())[i].size()) # - lis = np.zeros(epochs) # Number of steps to unroll seq_dim =lookback-1 for t in range(epochs): y_train_pred = model(x_train) loss = loss_fn(y_train_pred, y_train) if t % 10 == 0 and t !=0: print("Epoch ", t, "MSE: ", loss.item()) lis[t] = loss.item() optimiser.zero_grad() loss.backward() optimiser.step() prd = mm.inverse_transform(y_train_pred.detach().numpy()) org = mm.inverse_transform(y_train.detach().numpy()) # + plt.plot(prd, label="Preds") plt.plot(org, label="Data") plt.legend() plt.show() plt.plot(lis, label="Training loss") plt.legend() plt.show() # - np.shape(y_train_pred) # + import math from sklearn.metrics import mean_squared_error from math import sqrt # make predictions y_test_pred = model(x_test) # invert predictions y_train_pred = mm.inverse_transform(y_train_pred.detach().numpy()) y_train = mm.inverse_transform(y_train.detach().numpy()) y_test_pred = mm.inverse_transform(y_test_pred.detach().numpy()) y_test = mm.inverse_transform(y_test.detach().numpy()) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(y_train[:,0], y_train_pred[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(y_test[:,0], y_test_pred[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # + # shift train predictions for plotting trainPredictPlot = np.empty_like(close) trainPredictPlot[:, :] = np.nan trainPredictPlot[lookback:len(y_train_pred)+lookback, :] = y_train_pred # shift test predictions for plotting testPredictPlot = np.empty_like(close) testPredictPlot[:, :] = np.nan testPredictPlot[len(y_train_pred)+lookback-1:len(close)-1, :] = y_test_pred # plot baseline and predictions plt.figure(figsize=(15,8)) plt.plot(mm.inverse_transform(close),label='Actual Values') plt.plot(trainPredictPlot,label='Training Predictions') plt.plot(testPredictPlot,label='Test Predictions') plt.legend() plt.show() # -
Netflix Stock Price Prediction/Netflix Stock Price Prediction using Pytorch and RNN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 2 Problem 3 # + ############################################################################################################################# ############################################################################################################################### # - import numpy as np DS1_train = np.genfromtxt('DS1_train.txt') DS1_test = np.genfromtxt('DS1_test.txt') # + #DS1_train = np.genfromtxt('F:\\Winter 2018\\Applied ML\\winter 2018\\Assignments\\assignment 2\\hwk2_datasets_corrected\\DS1_train.txt') #DS1_test = np.genfromtxt('F:\\Winter 2018\\Applied ML\\winter 2018\\Assignments\\assignment 2\\hwk2_datasets_corrected\\DS1_test.txt') # - print(DS1_train.shape) print(DS1_test.shape) # + t_train = DS1_train[:,-1] t_train = np.reshape(t_train,(-1,1)) t_test = DS1_test[:,-1] t_test = np.reshape(t_test,(-1,1)) # + X_train = DS1_train[:,0:-1] #print(X_train.shape) X_test = DS1_test[:,0:-1] #print(X_train.shape) # + N = DS1_train.shape[0] N1 = np.int(t_train.sum()) N2 = DS1_train.shape[0] - N1 print('No of training example',N) print('No of positive example',N1) print('No of negative example',N2) # - Nofeatures = X_train.shape[1] print('No of features',Nofeatures) def EucledDistacne(X_train,X_test): import numpy as np d = np.reshape(np.sqrt(np.sum((X_train-X_test)**2,1)),(-1,1)) return d def KnnClassifier(X_train,X_test,K): noTestData = X_test.shape[0] pred = np.zeros((noTestData,1)) for n in range (0,noTestData): d = EucledDistacne(X_train,X_test[n,:]) neighbour = np.argsort(d,axis=0)[0:K] pred[n,:] = t_train[neighbour].mean(axis=0) if pred[n,:] > 0.5: pred[n,:] = 1 else: pred[n,:] = 0 return pred def EvaluateClassifier(pred,t_test): True_Positive = np.sum(t_test*pred) False_Positive = (pred>t_test).sum() False_Negative = (pred<t_test).sum() True_Negative = (pred.shape[0] - pred.sum()) - False_Negative accuracy = (True_Positive + True_Negative)/ t_test.shape[0] precision = True_Positive/(True_Positive+False_Positive) recall = True_Positive/(True_Positive+True_Negative) F_measure = 2*(precision*recall)/(precision+recall) return True_Positive, False_Positive, False_Negative, True_Negative, accuracy, precision, recall, F_measure appended_data = [] import pandas as pd K = 100 for k in range(2,K): pred = KnnClassifier(X_train,X_test,k) A = EvaluateClassifier(pred,t_test) A = np.reshape(A,(1,-1)) data = pd.DataFrame(A) appended_data.append(data) appended_data = pd.concat(appended_data, axis=0) columns = ['True_Positive', 'False_Positive', 'False_Negative', 'True_Negative', 'accuracy', 'precision', 'recall', 'F_measure'] appended_data.columns = columns appended_data.index= np.arange(2,K) import matplotlib.pyplot as plt plt.plot(appended_data.accuracy) plt.xlabel('Value of K') plt.ylabel('Accuracy') #plt.savefig('F:/Winter 2018/Applied ML/winter 2018/Assignments/assignment 2/fig11.png') print ("for K =" , appended_data.accuracy.idxmax() ,"provides max accuracy") print ("for K =" , appended_data.precision.idxmax() ,"provides max precision") print ("for K =" , appended_data.recall.idxmax() ,"provides max recall") print ("for K =" , appended_data.F_measure.idxmax() ,"provides max Fmeasure")
assignment 2/submission file/260800927/Advance ML assignment 2 problem 3 .ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Code for getting live streaming data from trading view by sending heartbeat every 10 seconds # # credits: [rahulmr](https://github.com/rahulmr) # # + import websocket import time import threading import json SOCKET = "wss://data.tradingview.com/socket.io/websocket" headers = { "Accept-Encoding": "gzip, deflate, br", # "Accept-Language": "en-US,en;q=0.9", # "Cache-Control": "no-cache", # "Connection": "Upgrade", "Host": "data.tradingview.com", "Origin": "https://www.tradingview.com", # "Pragma": "no-cache", # "Sec-WebSocket-Extensions": "permessage-deflate; client_max_window_bits", # "Sec-WebSocket-Key": "<KEY> # "Sec-WebSocket-Version": 13, # "Upgrade": "websocket", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36" } """ ~m~55~m~{"m":"chart_create_session","p":["cs_zEcm9GqyQdK0",""]} '~m~52~m~{"m":"quote_create_session","p":["qs_dl0OygXkO4uu"]}' ~m~98~m~{"m":"quote_add_symbols","p":["qs_dl0OygXkO4uu","BINANCE:BTCUSDT",{"flags":["force_permission"]}]} ~m~68~m~{"m":"quote_fast_symbols","p":["qs_dl0OygXkO4uu","BINANCE:BTCUSDT"]} ~m~6~m~~h~157 ~m~411~m~{"m":"quote_fast_symbols","p":["qs_x72fChUYomPp","MCX:GOLDGUINEAN2020","NSE:NIFTY","MCX:GOLDPETALN2020","NSE:BAJFINANCE","MCX:SILVERMQ2020","NSE:SBIN","NSE:BANKNIFTY","NSE:RBLBANK","NSE:INDIAVIX","NSE:INDUSINDBK","NSE:CIPLA","NSE:SUNTV","MCX:NATURALGAS1!","MCX:SILVERMIC1!","MCX:CRUDEOIL1!","MCX:GOLDM1!","NSE:BANKNIFTY1!","SGX:IN1!","OANDA:USDINR","NSE:DABUR","NSE:BERGEPAINT","NASDAQ:TSLA","BINANCE:BTCUSDT"]} ~m~68~m~{"m":"quote_fast_symbols","p":["qs_dl0OygXkO4uu","BINANCE:BTCUSDT"]} ~m~98~m~{"m":"quote_add_symbols","p":["qs_x72fChUYomPp","BINANCE:BTCUSDT",{"flags":["force_permission"]}]} """ def on_open(ws): print('opened connection') # def run(*args): # for i in range(30): # time.sleep(1) # ws.send("Hello %d" % i) # time.sleep(1) # ws.close() # print("thread terminating...") # threading.start_new_thread(run, ()) time.sleep(2) # ws.send('~m~524~m~{"m":"set_auth_token","p":["<PASSWORD> <KEY>"]}') ws.send('~m~54~m~{"m":"set_auth_token","p":["unauthorized_user_token"]}') ws.send('~m~55~m~{"m":"chart_create_session","p":["cs_zEcm9GqyQdK0",""]}') ws.send('~m~52~m~{"m":"quote_create_session","p":["qs_x72fChUYomPp"]}') ws.send('~m~344~m~{"m":"quote_set_fields","p":["qs_x72fChUYomPp","ch","chp","current_session","description","local_description","language","exchange","fractional","is_tradable","lp","lp_time","minmov","minmove2","original_name","pricescale","pro_name","short_name","type","update_mode","volume","currency_code","logoid","currency-logoid","base-currency-logoid"]}') ws.send('~m~98~m~{"m":"quote_add_symbols","p":["qs_x72fChUYomPp","BINANCE:BTCUSDT",{"flags":["force_permission"]}]}') ws.send('~m~98~m~{"m":"quote_add_symbols","p":["qs_x72fChUYomPp","BINANCE:BNBUSDT",{"flags":["force_permission"]}]}') ws.send('~m~91~m~{"m":"quote_add_symbols","p":["qs_x72fChUYomPp","NSE:SBIN",{"flags":["force_permission"]}]}') ws.send('~m~98~m~{"m":"quote_fast_symbols","p":["qs_x72fChUYomPp","BINANCE:BTCUSDT","BINANCE:BNBUSDT", "NSE:SBIN"]}') def on_close(ws): print('closed connection') def on_message(ws, message): # p = message.split('~', -1)[4] # data = json.loads(p) # print(data) # print(f'received message :: {message}') if 'lp' in message: p = message.split('~', -1)[4] data = json.loads(p) # print(data) timestamp = time.strftime("%Y-%m-%d %H:%M:%S") symbol = data['p'][1]['n'] ltp = data['p'][1]['v']['lp'] volume = data['p'][1]['v']['volume'] if symbol.upper() == "BINANCE:BTCUSDT": print(f'tick :: {timestamp} :: {symbol} :: {ltp} :: {volume}') if __name__ == "__main__": websocket.enableTrace(False) ws = websocket.WebSocketApp( SOCKET, on_message=on_message, on_open=on_open, on_close=on_close) wst = threading.Thread(target=ws.run_forever) wst.daemon = True wst.start() conn_timeout = 60 while not ws.sock.connected and conn_timeout: time.sleep(1) conn_timeout -= 1 while ws.sock is not None: time.sleep(10) # - # ### CONS: # - Some dates are missing, maybe we can average the middle readings? e.g. # # ``` # tick :: 2021-10-23 09:30:52 :: BINANCE:BTCUSDT :: 61392.43 :: 17356.0457 # tick :: 2021-10-23 09:30:57 :: BINANCE:BTCUSDT :: 61392.44 :: 17356.42096 # ``` # # - Same timestamp different data e.g. # # ``` # tick :: 2021-10-23 09:30:13 :: BINANCE:BTCUSDT :: 61374.07 :: 17349.5467 # tick :: 2021-10-23 09:30:13 :: BINANCE:BTCUSDT :: 61366.33 :: 17350.24384 # ```
notebooks/livestream by heartbeat.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Interactive data visualizations # # Jupyter Notebook has support for many kinds of interactive outputs, including # the ipywidgets ecosystem as well as many interactive visualization libraries. # These are supported in Jupyter Book, with the right configuration. # This page has a few common examples. # # First off, we'll download a little bit of data # and show its structure: import plotly.express as px data = px.data.iris() data.head() # ## Altair # # Interactive outputs will work under the assumption that the outputs they produce have # self-contained HTML that works without requiring any external dependencies to load. # See the [`Altair` installation instructions](https://altair-viz.github.io/getting_started/installation.html#installation) # to get set up with Altair. Below is some example output. import altair as alt alt.Chart(data=data).mark_point().encode( x="sepal_width", y="sepal_length", color="species", size='sepal_length' ) # ## Plotly # # Plotly is another interactive plotting library that provides a high-level API for # visualization. See the [Plotly JupyterLab documentation](https://plotly.com/python/getting-started/#JupyterLab-Support-(Python-3.5+)) # to get started with Plotly in the notebook. # # ```{margin} # Plotly uses [renderers to output different kinds of information](https://plotly.com/python/renderers/) # when you display a plot. Experiment with renderers to get the output you want. # ``` # # Below is some example output. # # :::{important} # For these plots to show, it may be necessary to load `require.js`, in your `_config.yml`: # # ```yaml # sphinx: # config: # html_js_files: # - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js # ``` # ::: # # + import plotly.io as pio import plotly.express as px import plotly.offline as py df = px.data.iris() fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species", size="sepal_length") fig # - # ## Bokeh # # Bokeh provides several options for interactive visualizations, and is part of the PyViz ecosystem. See # [the Bokeh with Jupyter documentation](https://docs.bokeh.org/en/latest/docs/user_guide/jupyter.html#userguide-jupyter) to # get started. # # Below is some example output. First we'll initialized Bokeh with `output_notebook()`. # This needs to be in a separate cell to give the JavaScript time to load. from bokeh.plotting import figure, show, output_notebook output_notebook() # Now we'll make our plot. p = figure() p.circle(data["sepal_width"], data["sepal_length"], fill_color=data["species"], size=data["sepal_length"]) show(p) # ## ipywidgets # # You may also run code for Jupyter Widgets in your document, and the interactive HTML # outputs will embed themselves in your site. See [the ipywidgets documentation](https://ipywidgets.readthedocs.io/en/latest/user_install.html) # for how to get set up in your own environment. # # ```{admonition} Widgets often need a kernel # Note that `ipywidgets` tend to behave differently from other interactive visualization libraries. They # interact both with Javascript, and with Python. Some functionality in `ipywidgets` may not # work in default Jupyter Book pages (because no Python kernel is running). You may be able to # get around this with [tools for remote kernels, like thebe](https://thebelab.readthedocs.org). # ``` # # Here are some simple widget elements rendered below. import ipywidgets as widgets widgets.IntSlider( value=7, min=0, max=10, step=1, description='Test:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='d' ) tab_contents = ['P0', 'P1', 'P2', 'P3', 'P4'] children = [widgets.Text(description=name) for name in tab_contents] tab = widgets.Tab() tab.children = children for ii in range(len(children)): tab.set_title(ii, f"tab_{ii}") tab # You can find [a list of existing Jupyter Widgets](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20List.html) # in the jupyter-widgets documentation.
docs/interactive/interactive.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Random Forest from the scratch (using dataset Adult from UCI) # # A modifield version of a modifield version of: # Decision Tree from the Scratch, <NAME> (Computational Engineer | Data Scientist). # *Source:* https://medium.com/@rakendd/decision-tree-from-scratch-9e23bcfb4928. # # This example is a basic refined from *exemplo1*. # # Done: # # 1) Using Bagging, get a group of $N_B$ random samples ($x_i, i = 1,... ,N_B$) with replacement for each three, for all $M$ trees. # # 2) Each tree with a maximum limit of $s_{MAX}$ splitlevels. Why there categorical features with more then two values, each level may have more then two nodes. *There is implemented the limit of splits trough each way, i.e., fallowing the same sequence till limit. # # 5) The ensembling model is based in voting, may possible to use both majority or soft. The schoice is made when using the predict funcion. *Rather that, it is use only soft voting. # # The final version, we will have: # # 3) Each tree receive $K = s_{MAX}$ random features from all $p$ features. # # 4) There are two alternatives for splitting with numeric features: using entropy criteria and random splitting between max/min values. For categorical features, all values receive a node. import re import numpy as np import pandas as pd eps = np.finfo(float).eps from numpy import log2 as log from tabulate import tabulate as tb from anytree import Node, RenderTree from anytree import search as anys from anytree.exporter import DotExporter from IPython.display import Image # ##### Load dataset: # + features = ["Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital Status", "Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss", "Hours per week", "Country", "Target"] train_data = pd.read_csv( #"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", "adult.data", names=features, sep=r'\s*,\s*', engine='python', na_values="?").dropna() Target = 'Target' Labels = train_data.Target.unique() counts = train_data.Target.value_counts() print(counts) test_data = pd.read_csv( #"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", "adult.test_fix", names=features, sep=r'\s*,\s*', skiprows=[0], engine='python', na_values="?").dropna() Labels = test_data.Target.unique() counts = test_data.Target.value_counts() print(counts) # + def find_entropy(df): entropy = 0 values = df[Target].unique() for value in values: temp = df[Target].value_counts()[value]/len(df[Target]) entropy += -temp*np.log2(temp) return entropy def find_entropy_attribute(df,attribute): if not np.issubdtype(df[attribute].dtype, np.number): return find_entropy_attribute_not_number(df,attribute), None else: return find_entropy_attribute_number(df,attribute) def find_entropy_attribute_not_number(df,attribute): target_variables = df[Target].unique() #This gives all 'Yes' and 'No' variables = df[attribute].unique() #This gives different features in that attribute (like 'Hot','Cold' in Temperature) entropy2 = 0 for variable in variables: entropy = 0 for target_variable in target_variables: num = len(df[attribute][df[attribute]==variable][df[Target] ==target_variable]) den = len(df[attribute][df[attribute]==variable]) fraction = num/(den+eps) entropy += -fraction*log(fraction+eps) entropy2 += -(den/len(df))*entropy return abs(entropy2) def find_entropy_attribute_number(df,attribute): target_variables = df[Target].unique() #This gives all 'Yes' and 'No' variables = df[attribute].unique() #This gives different features in that attribute (like 'Hot','Cold' in Temperature) variables.sort() if len(variables)>2: variables = variables[1:-1] vk3 = variables[0] entropy3 = 0 else: vk3 = variables[0] entropy3 = np.Inf for vk in variables: entropy = 0 for target_variable in target_variables: num = len(df[attribute][df[attribute]<=vk][df[Target] ==target_variable]) den = len(df[attribute][df[attribute]<=vk]) fraction = num/(den+eps) entropy += -fraction*log(fraction+eps) for target_variable in target_variables: num = len(df[attribute][df[attribute]>vk][df[Target] ==target_variable]) den = len(df[attribute][df[attribute]>vk]) fraction = num/(den+eps) entropy += -fraction*log(fraction+eps) entropy2 = (den/len(df))*abs(entropy) #print(str(entropy2)+"|"+str(vk)) if entropy2>entropy3: entropy3 = entropy2 vk3 = vk return abs(entropy3),vk3 def find_winner(df): IG = [] vk = list() for key in df.columns.difference([Target]): temp,temp2 = find_entropy_attribute(df,key) vk.append(temp2) IG.append(find_entropy(df)-temp) return df.columns.difference([Target])[np.argmax(IG)], vk[np.argmax(IG)] def print_result_node(node,value,classe,prob): print(node +' : '+value+' : '+classe+' ('+str(prob)+')') def buildtree(df,tree=None, mytree=None, T_pro=0.9, T_pro_num=0.6,total_splits=10,splits=1): def ramificatree(Thd,ss): if (len(clValue)==1): tree[node][value] = {} tree[node][value]['Class'] = clValue[0] tree[node][value]['Prob'] = 1.0 #print_result_node(node,value,clValue[0],1) else: prob = counts.max() / counts.sum() if (prob>=Thd)or(splits>=total_splits): tree[node][value] = {} tree[node][value]['Class'] = clValue[counts.argmax()] tree[node][value]['Prob'] = prob #print_result_node(node,value,clValue[counts.argmax()],prob) else: ss += 1 tree[node][value] = buildtree(subtable,splits=ss) #print(node +' : '+value+' : *') #print(find_winner(df)) #formata_dados(dados) node,vk = find_winner(df) if tree is None: tree={} tree[node] = {} if vk is None: attValue = np.unique(df[node]) for value in attValue: subtable = df[df[node] == value].reset_index(drop=True) clValue,counts = np.unique(subtable[Target],return_counts=True) splits += 1 ramificatree(T_pro,ss=splits) else: if (len(df[node][df[node] <= vk].unique())>0) and (len(df[node][df[node] > vk].unique())>0): # >vk value = node+' >'+str(vk) subtable = df[df[node] > vk].rename(columns = {node:value}).reset_index(drop=True) clValue,counts = np.unique(subtable[Target],return_counts=True) if (len(subtable[value].unique())==1) and (len(clValue)>1): tree[node][value] = {} tree[node][value]['Class'] = clValue[counts.argmax()] prob = counts.max() / counts.sum() tree[node][value]['Prob'] = prob #print_result_node(node,value,clValue[counts.argmax()],prob) else: splits += 1 ramificatree(T_pro_num,ss=splits) clValue_antes = clValue[0] value_antes = value # <=vk value = node+' <='+str(vk) subtable = df[df[node] <= vk].rename(columns = {node:value}).reset_index(drop=True) clValue,counts = np.unique(subtable[Target],return_counts=True) if ((len(subtable[value].unique())==1) and (len(clValue)>1)): tree[node][value] = {} tree[node][value]['Class'] = clValue[counts.argmax()] prob = counts.max() / counts.sum() tree[node][value]['Prob'] = prob #print_result_node(node,value,clValue[counts.argmax()],prob) else: splits += 1 ramificatree(T_pro_num,ss=splits) else: df[node] = df[node].astype(str) buildtree(df) return tree # Only to see def print_tree(arg): for pre, fill, node in RenderTree(arg): print("%s%s" % (pre, node.name)) def converte_para_anytree(tree,node=None,mytree=None): if node is None: temp = list(tree.keys()) node = temp[0] mytree = {} mytree[node] = Node(node) converte_para_anytree(tree,node,mytree) else: tree = tree[node] if not isinstance(tree, str): childs = list(tree.keys()) for child in childs: if (list(tree[child])[0] == 'Class'): temp = mytree[node] mytree[child] = Node(child, parent=temp, target=tree[child]['Class'], prob=tree[child]['Prob']) else: temp = mytree[node] mytree[child] = Node(child, parent=temp) converte_para_anytree(tree,child,mytree) else: mytree[node] = 'Fim' return mytree #anys.findall_by_attr(mytree['Taste'], name="target", value='Yes') def mostra_tree(tree): mytree = converte_para_anytree(tree) temp = list(tree.keys()) root = temp[0] mytree[root] for pre, fill, node in RenderTree(mytree[root]): txt_node = str(node) m = re.search('prob\=\d+\.\d+', txt_node) if Labels[0] in txt_node: if not m is None: print("%s%s" % (pre, node.name+': '+Labels[0]+' ('+m.group()[5:]+')')) else: print("%s%s" % (pre, node.name+': '+Labels[0]+' (?)')) elif Labels[1] in txt_node: if not m is None: print("%s%s" % (pre, node.name+': '+Labels[1]+' ('+m.group()[5:]+')')) else: print("%s%s" % (pre, node.name+': '+Labels[1]+' (?)')) else: print("%s%s" % (pre, node.name)) def mostra_tree_graph(tree, largura=None, altura=None): mytree = converte_para_anytree(tree) temp = list(tree.keys()) root = temp[0] mytree[root] DotExporter(mytree[root]).to_picture("tree.png") return Image(filename='tree.png', width=largura, height=altura) def predict(inst,tree): for node in tree.keys(): if ('<=' in str(tree[node].keys())): childs = list(tree[node].keys()) if ('<=' in childs[1]): temp = childs[1] childs[1] = childs[0] childs[0] = temp vk = float(childs[1].split('>')[1]) if ('>' in node): valor = float(str(inst[node.split('>')[0][:-1]])) elif ('<=' in node): valor = float(str(inst[node.split('<')[0][:-1]])) else: valor = float(str(inst[node])) if (valor > vk): tree = tree[node][childs[1]] prediction = None prob = None if (list(tree)[0] != 'Class'): prediction,prob = predict(inst, tree) else: prediction = tree['Class'] prob = tree['Prob'] break; else: tree = tree[node][childs[0]] prediction = None prob = None if (list(tree)[0] != 'Class'): prediction,prob = predict(inst, tree) else: prediction = tree['Class'] prob = tree['Prob'] break; else: value = str(inst[node]) if value in tree[node].keys(): tree = tree[node][value] prediction = None prob = None if (list(tree)[0] != 'Class'): prediction,prob = predict(inst, tree) else: prediction = tree['Class'] prob = tree['Prob'] break; else: prediction = 'Not exists node: '+value prob = 0 return prediction, prob def predict_forest(arg,forest): prob_yes = 0 prob_no = 0 for tree in forest: result = predict(arg,tree) if (result[0] == arg.Target): prob_yes += result[1] else: prob_no += 1-result[1] return prob_yes, prob_no def test_step_prob(arg,tree): P = 0; S = 0 for i in range(0,len(arg)): S += (predict(arg.iloc[i],tree)[0] == arg.iloc[i].Target)*1 P += predict(arg.iloc[i],tree)[1] S = S / len(arg) P = P / len(arg) print(str(S)+' ('+str(P)+')') def test_step(arg,tree): NO = 0; YES = 0 for i in range(0,len(arg)): if (predict(arg.iloc[i],tree)[0] == arg.iloc[i].Target): YES += 1 else: NO += 1 YES = YES / len(arg) NO = NO / len(arg) #print("YES: "+str(YES)+'. NO: '+str(NO)+'.') return YES,NO def test_step_forest(arg,forest): NO = 0; YES = 0 for i in range(0,len(arg)): result = predict_forest(arg.loc[i],forest) if result[0]>result[1]: YES += 1 else: NO += 1 YES = YES / len(arg) NO = NO / len(arg) #print("YES: "+str(YES)+'. NO: '+str(NO)+'.') return YES,NO # Bagging functions: def formata_dados(dados): for chave in dados.keys(): if not np.issubdtype(dados[chave].dtype, np.number): dados[chave] = dados[chave].astype(str) elif (len(dados[chave].unique())<5): dados[chave] = dados[chave].astype(str) return dados def amostra_dados(dados,n_samples): dados2 = dados.loc[dados[Target]==Labels[0]].sample(int(n_samples/2)) dados2 = dados2.append(dados.loc[dados[Target]==Labels[1]].sample(int(n_samples/2)), ignore_index=True).reset_index(drop=True) return formata_dados(dados2) # - n_samples=40 forest = list() M = 250 for m in range(0,M): print(str(m+1)+'/'+str(M), end='\r') train_bag = amostra_dados(train_data,n_samples) forest.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8)) n_samples_test = 1000 test_bag = amostra_dados(test_data,n_samples_test) values_tree = np.empty((M,2)) m=0 for tree in forest: result = test_step(test_bag,tree) values_tree[m][0] = result[0] values_tree[m][1] = result[1] m+=1 values_forest = test_step_forest(test_bag,forest) mean_tree = round(values_tree[:,0].mean(),4) std_tree = round(values_tree[:,0].std(),4) print("\n") print(tb([['Trees', "{:.2f}".format(mean_tree)], ['Forest ', "{:.2f}".format(values_forest[0])]], headers=["Method", "Precision (%)"], tablefmt='orgtbl')) mean_tree = round(values_tree[:,0].mean(),4) std_tree = round(values_tree[:,0].std(),4) print("\n") print(tb([['Trees', "{:.2f}".format(mean_tree)], ['Forest ', "{:.2f}".format(values_forest[0])]], headers=["Method", "Precision (%)"], tablefmt='orgtbl')) size_tree = np.empty((M,1)) m=0 for tree in forest: size_tree[m] = len(str(tree)) m+=1 test_step(test_bag,forest[size_tree.argmin()]) mostra_tree_graph(forest[size_tree.argmin()]) mostra_tree(forest[size_tree.argmin()]) test_step(test_bag,forest[size_tree.argmax()]) mostra_tree_graph(forest[size_tree.argmax()]) mostra_tree(forest[size_tree.argmax()]) test_bag.dtypes
RFCPY/.ipynb_checkpoints/exemplo2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] deletable=true editable=true # repr or str are the functions to convert any data type to string # str(1) == repr() # + deletable=true editable=true for x in range(1,6): print(str(x).rjust(2), repr(x*x).rjust(4), end = ' ') print(str(x*x*x).rjust(5)) # + deletable=true editable=true list = [1,2,4,6] listr = repr(list) #lists = str(list) #cannot convert string to list or tuple obj = (1,3,6, [1,8]) repr(obj) # str(obj) #gives error.That is the fucking difference # + deletable=true editable=true list1 = ["Name", "age", "sex"] list2 = ["Jai", "12", "M"] for x,y in zip(list1, list2): #looping technique 1 print("{0} : {1}".format(x,y)) # + deletable=true editable=true for x in range(1,6): print("{0:3d} {1:3d} {2:4d}".format(x,x*x,x*x*x)) # + deletable=true editable=true str = "5.64" str.zfill(10) # + deletable=true editable=true print("They {} the {}".format("play", "football")) print("They {1} the {0}".format("play", "football")) print("They {do} the {game}".format(do = "play", game = "football")) str = "Jai" a= 5 b = 5.5 print("this is {0}th Example by {1} for {2}".format(a,str,b)) # + [markdown] deletable=true editable=true # ## File I/O # + [markdown] deletable=true editable=true # open() returns a file object, and is most commonly used with two arguments: open(filename, mode) # # The first argument is a string containing the filename. The second argument is another string containing a few characters describing the way in which the file will be used. mode can be 'r' when the file will only be read, 'w' for only writing (an existing file with the same name will be erased), and 'a' opens the file for appending; any data written to the file is automatically added to the end. 'r+' opens the file for both reading and writing. The mode argument is optional; 'r' will be assumed if it’s omitted. # # # Let The file "test.txt" be, # Hi there. # My name is <NAME> # Welcome to Jupyer Notebook # + deletable=true editable=true f = open("test.txt", "r") f.read() # + deletable=true editable=true #File pointer after reading is at end..so if we write f.read, irt display empty string f.read() # + deletable=true editable=true f = open("test.txt", "r") print(f.readline()) print(f.readline()) f.close() # + deletable=true editable=true f = open("test.txt", "r") for line in f: print(line, end = " ") f.close() # + deletable=true editable=true f = open("file.txt", "w") f.write("Overwrite the text with this") # return the number of character written f = open("file.txt", "r") f.read() f.close() # + deletable=true editable=true tuple_ = ("Hi there", 5, True) tupe_str = repr(tuple_) f = open("test.txt", "w") f.write(tupe_str) f = open("test.txt", "r") print(f.read()) f.close() # + deletable=true editable=true str = "jaisinghal" f = open("test.txt", "r+") #for read and write f.write(str) f.seek(2) f.read() # + deletable=true editable=true f = open("test.txt", "a+") #append and read f.write("sasasasa") f.seek(2) f.read()
python_basics/I-O and Output Formatting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys import random #TODO Make universal imports for all py files? #os.environ["CUDA_VISIBLE_DEVICES"]="-1" import tensorflow as tf from tensorflow import keras from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau sys.modules['keras'] = keras import numpy as np from dice import dice_coef, dice_loss from generator import DataGen from visualize import display_slice_from_batch seed = 2019 random.seed = seed #TODO make config[seed] and fix below #np.random.seed = seed tf.seed = seed # + image_size = 48 patch_size = 48 percent_covered = 0 train_path = "dataset/genPSNR5_5000_48Data/train" model_path = "oct13_largeframePSNR5_5000_48_nonzero_standardized_local_more-epochs.h5" epochs = 70 patience = 4 batch_size = 16 train_ids = next(os.walk(train_path))[1] # Returns all directories under train_path val_data_size = 8 # Needs to be greater than batch_size valid_ids = train_ids[:val_data_size] train_ids = train_ids[val_data_size:] # + gen = DataGen(train_ids, train_path, batch_size=batch_size, image_size=image_size, patch_size=patch_size, percent_covered = percent_covered) x, y = gen.__getitem__(0) print(x.shape, y.shape) # + n=0 z=8 display_slice_from_batch(x, n, z) print(x[n, :, :, z].shape) print(np.amax(x[n, :, :, z])) print(np.amax(y[n])) display_slice_from_batch(y, n, z) # + def down_block(x, filters, kernel_size=(3, 3, 3), padding="same", strides=(1, 1, 1)): c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x) c = keras.layers.Conv3D(filters*2, kernel_size, padding=padding, strides=strides, activation="relu")(c) p = keras.layers.MaxPool3D((2, 2, 2))(c) return c, p def up_block(x, skip, filters, kernel_size=(3, 3, 3), padding="same", strides=(1, 1, 1)): us = keras.layers.Conv3DTranspose(filters*4, (2, 2, 2), (2, 2, 2))(x) concat = keras.layers.Concatenate()([us, skip]) c = keras.layers.Conv3D(filters*2, kernel_size, padding=padding, strides=strides, activation="relu")(concat) c = keras.layers.Conv3D(filters*2, kernel_size, padding=padding, strides=strides, activation="relu")(c) return c def bottleneck(x, filters, kernel_size=(3, 3, 3), padding="same", strides=(1, 1, 1)): c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu")(x) c = keras.layers.Conv3D(filters*2, kernel_size, padding=padding, strides=strides, activation="relu")(c) return c # - def UNet(): #f = [16, 32, 64, 128, 256] f = [32, 64, 128, 256] inputs = keras.layers.Input((patch_size, patch_size, patch_size, 1)) p0 = inputs c1, p1 = down_block(p0, f[0]) #32 -> 16 c2, p2 = down_block(p1, f[1]) #16 -> 8 c3, p3 = down_block(p2, f[2]) #8 -> 4 #c4, p4 = down_block(p3, f[3]) #16->8 bn = bottleneck(p3, f[3]) u1 = up_block(bn, c3, f[2]) #4 -> 8 u2 = up_block(u1, c2, f[1]) #8 -> 16 u3 = up_block(u2, c1, f[0]) #16 -> 32 #u4 = up_block(u3, c1, f[0]) #64 -> 128 outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="sigmoid")(u3) model = keras.models.Model(inputs, outputs) return model # + model = UNet() model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-5), loss=dice_loss(smooth=1.), metrics=[dice_coef, 'accuracy'])#, sample_weight_mode="temporal") model.summary() #TODO Does valid_gen use percent_covered = 0 or nonzero? train_gen = DataGen(train_ids, train_path, image_size=image_size, patch_size=patch_size, batch_size=batch_size, percent_covered=percent_covered) valid_gen = DataGen(valid_ids, train_path, image_size=image_size, patch_size=patch_size, batch_size=batch_size, percent_covered=percent_covered) # + #TODO Account for filtered patchese train_steps = len(train_ids)*8//batch_size valid_steps = len(valid_ids)*8//batch_size callbacks = [EarlyStopping(monitor='val_loss', patience=patience, verbose=1), ModelCheckpoint(filepath=model_path, monitor='val_loss', save_best_only=True, verbose=1), ReduceLROnPlateau(factor=0.5, patience=2, verbose=1)] history = model.fit_generator(train_gen, validation_data=valid_gen, steps_per_epoch=train_steps, validation_steps=valid_steps, epochs=epochs, callbacks=callbacks) # - import pickle with open('trainHistoryDict_psnr5_5000_more-epochs', 'wb') as handle: # saving the history of the model pickle.dump(history.history, handle) # + import matplotlib.pyplot as plt # %matplotlib inline print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # + ## Save the Weights #model.save_weights("UNet3D1.h5") ## Dataset for prediction test_gen = DataGen(valid_ids, train_path, image_size=image_size, patch_size=patch_size, batch_size=batch_size, percent_covered=0) x, y = test_gen.__getitem__(0) print(x.shape) #result = model.predict(x) #print(np.amax(result)) #print(np.count_nonzero(result == 1.0)) #print(result.shape) #result = result > 0.5 #print(np.count_nonzero(result == 1.0)) #print(result.shape) #print(np.where(result[0]==1.0)[0]) #print(result[0]) n=100 z=8 display_slice_from_batch(x,n,z) display_slice_from_batch(y,n,z) display_slice_from_batch(result,n,z) # -
src/2Unet3D_PSNR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # For Each State Calculating Accident Rate per 1000 People import pandas as pd #Import Accident data from CSV path="../data/YK_all2017-2019_accidents.csv" traffic_df = pd.read_csv(path) # + # traffic_df # debug print # - # read population census data path="../data/YK_states_pop.csv" states_pop=pd.read_csv(path) states_pop['State']=states_pop['State Abbr'] #Groupby state_groupby=traffic_df.groupby(['State']) # 3 year total accidents by state a_rate=state_groupby.agg({'ID':'count'}).sort_values("ID",ascending=False) a_rate # averaging over 3 years a_rate['Yearly Accident rate']=(a_rate['ID']/3).round(decimals=1) a_rate.isnull().values.any() # merge with census data a_pop_rate=a_rate.merge(states_pop, how="left",on='State') #a_pop_rate.loc[a_pop_rate.isnull().values] a_pop_rate['Accident rate by 1000 people']=(a_pop_rate['Yearly Accident rate']/a_pop_rate['2019']*1000).round(decimals=2) a_pop_rate.sort_values("Accident rate by 1000 people",ascending=False) #write to csv a_pop_rate.to_csv('../data/YK_States_AAR.csv',index=False)
CODE_analysis/YK_State_AAR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # The Case for Using an Embedding Encoder # # It's a common practice to use pre-trained word embeddings to jump start an NLP classifier's performance. The embeddings can be further updated during training (if they aren't frozen) and this can lead to even better performance. If an unseen word is encountered (usually called OOV, out of vocabulary), typically a new embedding row is added. Typically, there are three main strategies for initializing the row's values: # * random values # * zeros # * random values seeded with the matrix's variance for each corresponding column # # We'll examine these approaches, and propose a new method, where we train a regression model to predict the row values. Of course, a regression model isn't going to replace the training of embeddings, however, we'll see that it may significantly jump start the seeding values better than the typical approaches. # The regression model approach, although elementary, has escaped some recent research: # # e.g. "we do not see any significant difference between various methods of random initialization, as long as the variance is kept reasonably low." # # An Exploration of Word Embedding Initialization in Deep-Learning Tasks <NAME> and <NAME>, https://arxiv.org/pdf/1711.09160.pdf # ### Concrete Task: find which word embeddings are in GoogleNews but not in Glove, train an encoder to create suitable initializations, instead of using random values # + # %load_ext autoreload # %autoreload 2 # %matplotlib inline import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.optim as optim import torchvision from tqdm import tqdm from random import sample from statistics import mean, stdev from scipy.spatial.distance import cosine from sklearn.model_selection import train_test_split from sklearn.manifold import TSNE import numpy as np import os import sys import inspect from pathlib import Path currentdir = Path.cwd() # currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0,parentdir) from mlyoucanuse.embeddings import (load_embeddings, EMBEDDINGS_METADATA, create_embeddings_matrix, get_embeddings_layer, get_embeddings_index) from mlyoucanuse.featurize_text_fun import word_to_features seed = 12 torch.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # - # ## Load Glove Embeddings, extract vocab, sample # + print(f"Available embeddings: {', '.join(list(EMBEDDINGS_METADATA.keys()))}") glove_embed = get_embeddings_index('glove', parent_dir=parentdir, embedding_dimensions=300) glove_vocab = {tmp for tmp in tqdm(glove_embed.keys())} sample(glove_vocab, 5) # - # ## Determine the max sequence length so we can model it. max_word_len = max([len(tmp) for tmp in glove_embed.keys()]) max_word_len # ## Load GoogleNews embeddings, extract vocab, sample gnews_embed = get_embeddings_index('GoogleNews', parent_dir=parentdir, embedding_dimensions=300) gnews_vocab = {tmp for tmp in tqdm(gnews_embed.keys())} sample(gnews_vocab, 5) # + sum_underscores = sum([1 for tmp in gnews_vocab if '_' in tmp]) print(f"GoogleNews keys with underscores: {sum_underscores:,}, {sum_underscores / len(gnews_vocab):.2f}%") sum_underscores = sum([1 for tmp in glove_vocab if '_' in tmp]) print(f"Glove keys with underscores: {sum_underscores:,} {sum_underscores / len(glove_vocab):.2f}%") # - gnews_vocab_single_words = {tmp for tmp in gnews_vocab if '_' not in tmp} glove_vocab_single_words = {tmp for tmp in glove_vocab if '_' not in tmp} glove_single_word_embeddings = [val for key, val in glove_embed.items() if '_' not in key] only_in_gnews = gnews_vocab_single_words - glove_vocab_single_words # use set subtraction to find the missing words print(f"Number of single words in GoogleNews: {len(only_in_gnews):,}") sample(only_in_gnews, 5) # ### Manual sampling and selection produced these only_in_gnews favorites: # * resuspension # * comments # * devourers # * stickbait # ### Capitalized words are usually entities; let's extract lower case candidates since relative words for comparison will be easier to find. lc_only_in_gnews = {tmp for tmp in only_in_gnews if tmp.islower()} sample(lc_only_in_gnews, 5) # ### Manual sampling and review selected these suitable candidates: # * underserviced # * guitaring # * wholesomely # * redevelops # * enkindle # * mooched # * wattages # ## Check that are candidates and near matches are available in the target embeddding # + print('underserviced' in only_in_gnews, 'underservice' in glove_vocab_single_words) some_words_only_in_gnews = [ 'resuspension', 'devourers', 'underserviced', 'guitaring', 'wholesomely', 'redevelops', 'enkindle', 'mooched', 'wattages', 'hellaciously', 'pouters', 'flushers'] for word in some_words_only_in_gnews: if word not in only_in_gnews: print(word) analogous_words_in_glove =[ 'suspension', 'devour', 'underserved', 'guitar', 'wholesome', 'redevelop', 'kindle', 'mooch', 'wattage', 'hellacious', 'pout', 'flusher'] for word in analogous_words_in_glove: if word not in glove_vocab_single_words: print(word) # not found microcrack, pouter # - # # Define the data and data loader # + X = [word_to_features((word + ' '*70)[:70], max_word_length=70, reverse=False) for word in list(glove_vocab_single_words)] y = glove_single_word_embeddings X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05, random_state=42) X_train = torch.tensor(X_train, dtype=torch.float) print('X_train', X_train.shape) y_train = torch.tensor(y_train) print('y_train', y_train.shape) X_test = torch.tensor(X_test, dtype=torch.float) print('X_test', X_test.shape) y_test = torch.tensor(y_test) print('y_test', y_test.shape) batch_size = 64 epochs = 3 learning_rate = 1e-3 train_data = [] for i in range(len(X_train)): train_data.append([X_train[i], y_train[i]]) trainloader = torch.utils.data.DataLoader(train_data, shuffle=True, batch_size=batch_size) # - # # Define model and train to generate embeddings # # Other model architectures were tried, but this was the most performant # + class EmbeddingEncoder(nn.Module): def __init__(self, input_shape=70, embedding_size=300, **kwargs): super().__init__() self.encoder_hidden_layer = nn.Linear(in_features=input_shape, out_features=embedding_size) self.encoder_output_layer = nn.Linear(in_features=embedding_size, out_features=embedding_size) def forward(self, features): activation = self.encoder_hidden_layer(features) activation = torch.relu(activation) code = self.encoder_output_layer(activation) return code device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = EmbeddingEncoder(input_shape=70, embedding_size=300).to(device) optimizer = optim.Adam(model.parameters(), lr=1e-3) criterion = nn.MSELoss() model.train() for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader): inputs, labels = data inputs.to(device) labels.to(device) optimizer.zero_grad() #: zero the parameter gradients outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % (batch_size * 10) == 0: # print every 100 mini-batches print('[epoch %d, batch %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / (batch_size *10) ) ) running_loss = 0.0 print('Finished Training') # - # # Verify with the test set # ## Our metric will be Cosine similarity; so the lower the percent score, the better # + y_pred = [] model.eval() with torch.no_grad(): for idx, x in enumerate(X_test): the_pred = model(x) y_pred.append(the_pred) scores = [cosine(y_pred[idx], sample) for sample in y_test] mean(scores), stdev(scores) # (0.679192502973701, 0.15922573805486012) # - # # Similarity scores for default initializations, rows of all ones ones = torch.ones(300, dtype=torch.float) scores = [cosine(ones, sample) for sample in glove_embed.values()] mean(scores), stdev(scores) # # Similarity scores for default initializations, rows of almost zeros almost_zeros = torch.tensor([0.1e-3] * 300) scores = [cosine(almost_zeros, sample) for sample in glove_embed.values()] mean(scores), stdev(scores) # # Similarity scores for matrix rows initialized to random values centered on the variance of the embedding cols = list(glove_embed.values())[0].shape[0] embed_ar = np.asarray(list(glove_embed.values())) matrix_variance = np.asarray([np.var(embed_ar[:, idx]) for idx in range(cols)]) del embed_ar, cols scores = [cosine(torch.tensor(matrix_variance * np.random.rand(1, 300)), sample) for sample in y_test] mean(scores), stdev(scores) # + # Save and load if desired # torch.save(model, 'glove_embedding_encoder.pth') # model = torch.load( 'glove_embedding_encoder.pth') # - # # Here's an example of the matrix values we're trying to approximate print(cosine(glove_embed['presidents'], glove_embed['president'])) print(cosine(gnews_embed['presidents'], gnews_embed['president'])) # ## Even with the same word, with the difference being singular or plural, there's still some fair amount of difference between their representations. analogous_words_in_glove # ## Let's visualize the generated embedding initializations # we'll use TSNE to get a visual representation; reducing the dimensionality from 300 to 2 won't give us an exact comparison, but it should allow us to see if we're in the ballpark. # + X_to_find = [word_to_features((word + ' '*70)[:70], max_word_length=70, reverse=False) for word in some_words_only_in_gnews] X_to_find = torch.tensor(X_to_find, dtype=torch.float) old_and_new_words ={} for word in analogous_words_in_glove: old_and_new_words[word] = glove_embed[word] old_and_new_words['president'] = glove_embed['president'] old_and_new_words['presidents'] = glove_embed['presidents'] model.eval() with torch.no_grad(): for idx, x in enumerate(X_to_find): old_and_new_words[some_words_only_in_gnews[idx]] = model(x).detach().numpy() items = list(old_and_new_words.keys()) item_vectors = old_and_new_words.items() vectors = np.asarray([x[1] for x in item_vectors]) lengths = np.linalg.norm(vectors, axis=1) norm_vectors = (vectors.T / lengths).T tsne = TSNE(n_components=2, perplexity=10, verbose=2).fit_transform(norm_vectors) x = tsne[:,0] y = tsne[:,1] fig, ax = plt.subplots(figsize=(20, 10)) ax.scatter(x, y) for item, x1, y1 in zip(item_vectors, x, y): ax.annotate(item[0], (x1, y1), size=14) plt.show() # - # ### Compare the relations for the words "president" and "presidents" above with the other, generated word embedding values are close, but could use some further training. But they are better than random. # ## Summary # * Initializing an embedding row for an OOV item via a regression model may provide the best results. # * Featurizing each letter may allow the regression model to mimic the subword embeddings similar to those provided by FastText embeddings.
quality_embeddings/embedding_encoder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Estimating counts # + [markdown] tags=["remove-cell"] # Think Bayes, Second Edition # # Copyright 2020 <NAME> # # License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/) # + tags=["remove-cell"] # If we're running on Colab, install empiricaldist # https://pypi.org/project/empiricaldist/ import sys IN_COLAB = 'google.colab' in sys.modules if IN_COLAB: # !pip install empiricaldist # + tags=["remove-cell"] # Get utils.py import os if not os.path.exists('utils.py'): # !wget https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py # + tags=["remove-cell"] from utils import set_pyplot_params set_pyplot_params() # - # In the previous chapter we solved problems that involve estimating proportions. # In the Euro problem, we estimated the probability that a coin lands heads up, and in the exercises, you estimated a batting average, the fraction of people who cheat on their taxes, and the chance of shooting down an invading alien. # # Clearly, some of these problems are more realistic than others, and some are more useful than others. # # In this chapter, we'll work on problems related to counting, or estimating the size of a population. # Again, some of the examples will seem silly, but some of them, like the German Tank problem, have real applications, sometimes in life and death situations. # ## The train problem # # I found the train problem # in <NAME>'s, [*Fifty Challenging Problems in # Probability with Solutions*](https://store.doverpublications.com/0486653552.html): # # > "A railroad numbers its locomotives in order 1..N. One day you see a locomotive with the number 60. Estimate how many locomotives the railroad has." # # Based on this observation, we know the railroad has 60 or more # locomotives. But how many more? To apply Bayesian reasoning, we # can break this problem into two steps: # # * What did we know about $N$ before we saw the data? # # * For any given value of $N$, what is the likelihood of seeing the data (a locomotive with number 60)? # # The answer to the first question is the prior. The answer to the # second is the likelihood. # # We don't have much basis to choose a prior, so we'll start with # something simple and then consider alternatives. # Let's assume that $N$ is equally likely to be any value from 1 to 1000. # # Here's the prior distribution: # + import numpy as np from empiricaldist import Pmf hypos = np.arange(1, 1001) prior = Pmf(1, hypos) # - # # Now let's figure out the likelihood of the data. # In a hypothetical fleet of $N$ locomotives, what is the probability that we would see number 60? # If we assume that we are equally likely to see any locomotive, the chance of seeing any particular one is $1/N$. # # Here's the function that does the update: def update_train(pmf, data): """Update pmf based on new data.""" hypos = pmf.qs likelihood = 1 / hypos impossible = (data > hypos) likelihood[impossible] = 0 pmf *= likelihood pmf.normalize() # This function might look familiar; it is the same as the update function for the dice problem in the previous chapter. # In terms of likelihood, the train problem is the same as the dice problem. # # Here's the update: data = 60 posterior = prior.copy() update_train(posterior, data) # Here's what the posterior looks like: # + tags=["hide-input"] from utils import decorate posterior.plot(label='Posterior after train 60', color='C4') decorate(xlabel='Number of trains', ylabel='PMF', title='Posterior distribution') # - # Not surprisingly, all values of $N$ below 60 have been eliminated. # # The most likely value, if you had to guess, is 60. posterior.max_prob() # That might not seem like a very good guess; after all, what are the chances that you just happened to see the train with the highest number? # Nevertheless, if you want to maximize the chance of getting # the answer exactly right, you should guess 60. # # But maybe that's not the right goal. # An alternative is to compute the mean of the posterior distribution. # Given a set of possible quantities, $q_i$, and their probabilities, $p_i$, the mean of the distribution is: # # $$\mathrm{mean} = \sum_i p_i q_i$$ # # Which we can compute like this: np.sum(posterior.ps * posterior.qs) # Or we can use the method provided by `Pmf`: posterior.mean() # The mean of the posterior is 333, so that might be a good guess if you want to minimize error. # If you played this guessing game over and over, using the mean of the posterior as your estimate would minimize the [mean squared error](http://en.wikipedia.org/wiki/Minimum_mean_square_error) over the long run. # ## Sensitivity to the prior # # The prior I used in the previous section is uniform from 1 to 1000, but I offered no justification for choosing a uniform distribution or that particular upper bound. # We might wonder whether the posterior distribution is sensitive to the prior. # With so little data---only one observation---it is. # # This table shows what happens as we vary the upper bound: # + tags=["hide-input"] import pandas as pd df = pd.DataFrame(columns=['Posterior mean']) df.index.name = 'Upper bound' for high in [500, 1000, 2000]: hypos = np.arange(1, high+1) pmf = Pmf(1, hypos) update_train(pmf, data=60) df.loc[high] = pmf.mean() df # - # As we vary the upper bound, the posterior mean changes substantially. # So that's bad. # # When the posterior is sensitive to the prior, there are two ways to proceed: # # * Get more data. # # * Get more background information and choose a better prior. # # With more data, posterior distributions based on different priors tend to converge. # For example, suppose that in addition to train 60 we also see trains 30 and 90. # # Here's how the posterior means depend on the upper bound of the prior, when we observe three trains: # + tags=["hide-input"] df = pd.DataFrame(columns=['Posterior mean']) df.index.name = 'Upper bound' dataset = [30, 60, 90] for high in [500, 1000, 2000]: hypos = np.arange(1, high+1) pmf = Pmf(1, hypos) for data in dataset: update_train(pmf, data) df.loc[high] = pmf.mean() df # - # The differences are smaller, but apparently three trains are not enough for the posteriors to converge. # ## Power law prior # # If more data are not available, another option is to improve the # priors by gathering more background information. # It is probably not reasonable to assume that a train-operating company with 1000 locomotives is just as likely as a company with only 1. # # With some effort, we could probably find a list of companies that # operate locomotives in the area of observation. # Or we could interview an expert in rail shipping to gather information about the typical size of companies. # # But even without getting into the specifics of railroad economics, we # can make some educated guesses. # In most fields, there are many small companies, fewer medium-sized companies, and only one or two very large companies. # # In fact, the distribution of company sizes tends to follow a power law, as <NAME> reports in *Science* ([official site](http://www.sciencemag.org/content/293/5536/1818.full.pdf), [available here](https://sci-hub.tw/10.1126/science.1062081)). # # This law suggests that if there are 1000 companies with fewer than # 10 locomotives, there might be 100 companies with 100 locomotives, # 10 companies with 1000, and possibly one company with 10,000 locomotives. # # Mathematically, a power law means that the number of companies with a given size, $N$, is proportional to $(1/N)^{\alpha}$, where $\alpha$ is a parameter that is often near 1. # # We can construct a power law prior like this: # + tags=["hide-output"] alpha = 1.0 ps = hypos**(-alpha) power = Pmf(ps, hypos, name='power law') power.normalize() # - # For comparison, here's the uniform prior again. hypos = np.arange(1, 1001) uniform = Pmf(1, hypos, name='uniform') uniform.normalize() # Here's what a power law prior looks like, compared to the uniform prior: # + tags=["hide-input"] uniform.plot() power.plot() decorate(xlabel='Number of trains', ylabel='PMF', title='Prior distributions') # - # Here's the update for both priors. dataset = [60] update_train(uniform, dataset) update_train(power, dataset) # And here are the posterior distributions. # + tags=["hide-input"] uniform.plot() power.plot() decorate(xlabel='Number of trains', ylabel='PMF', title='Posterior distributions') # - # The power law gives less prior probability to high values, which yields lower posterior means, and less sensitivity to the upper bound. # # Here's how the posterior means depend on the upper bound when we use a power law prior and observe three trains: # + tags=["hide-input"] df = pd.DataFrame(columns=['Posterior mean']) df.index.name = 'Upper bound' alpha = 1.0 dataset = [30, 60, 90] for high in [500, 1000, 2000]: hypos = np.arange(1, high+1) ps = hypos**(-alpha) power = Pmf(ps, hypos) for data in dataset: update_train(power, data) df.loc[high] = power.mean() df # - # Now the differences are much smaller. In fact, # with an arbitrarily large upper bound, the mean converges on 134. # # So the power law prior is more realistic, because it is based on # general information about the size of companies, and it behaves better in practice. # ## Credible intervals # # So far we have seen two ways to summarize a posterior distribution: the value with the highest posterior probability (the MAP) and the posterior mean. # These are both **point estimates**, that is, single values that estimate the quantity we are interested in. # # Another way to summarize a posterior distribution is with percentiles. # If you have taken a standardized test, you might be familiar with percentiles. # For example, if your score is the 90th percentile, that means you did as well as or better than 90\% of the people who took the test. # # If we are given a value, `x`, we can compute its **percentile rank** by finding all values less than or equal to `x` and adding up their probabilities. # # `Pmf` provides a method that does this computation. # So, for example, we can compute the probability that the company has less than or equal to 100 trains: power.prob_le(100) # With a power law prior and a dataset of three trains, the result is about 29%. # So 100 trains is the 29th percentile. # # Going the other way, suppose we want to compute a particular percentile; for example, the median of a distribution is the 50th percentile. # We can compute it by adding up probabilities until the total exceeds 0.5. # Here's a function that does it: def quantile(pmf, prob): """Compute a quantile with the given prob.""" total = 0 for q, p in pmf.items(): total += p if total >= prob: return q return np.nan # The loop uses `items`, which iterates the quantities and probabilities in the distribution. # Inside the loop we add up the probabilities of the quantities in order. # When the total equals or exceeds `prob`, we return the corresponding quantity. # # This function is called `quantile` because it computes a quantile rather than a percentile. # The difference is the way we specify `prob`. # If `prob` is a percentage between 0 and 100, we call the corresponding quantity a percentile. # If `prob` is a probability between 0 and 1, we call the corresponding quantity a **quantile**. # # Here's how we can use this function to compute the 50th percentile of the posterior distribution: quantile(power, 0.5) # The result, 113 trains, is the median of the posterior distribution. # # `Pmf` provides a method called `quantile` that does the same thing. # We can call it like this to compute the 5th and 95th percentiles: power.quantile([0.05, 0.95]) # The result is the interval from 91 to 243 trains, which implies: # # * The probability is 5% that the number of trains is less than or equal to 91. # # * The probability is 5% that the number of trains is greater than 243. # # Therefore the probability is 90% that the number of trains falls between 91 and 243 (excluding 91 and including 243). # For this reason, this interval is called a 90% **credible interval**. # # `Pmf` also provides `credible_interval`, which computes an interval that contains the given probability. power.credible_interval(0.9) # ## The German tank problem # # During World War II, the Economic Warfare Division of the American # Embassy in London used statistical analysis to estimate German # production of tanks and other equipment. # # The Western Allies had captured log books, inventories, and repair # records that included chassis and engine serial numbers for individual # tanks. # # Analysis of these records indicated that serial numbers were allocated # by manufacturer and tank type in blocks of 100 numbers, that numbers # in each block were used sequentially, and that not all numbers in each # block were used. So the problem of estimating German tank production # could be reduced, within each block of 100 numbers, to a form of the # train problem. # # Based on this insight, American and British analysts produced # estimates substantially lower than estimates from other forms # of intelligence. And after the war, records indicated that they were # substantially more accurate. # # They performed similar analyses for tires, trucks, rockets, and other # equipment, yielding accurate and actionable economic intelligence. # # The German tank problem is historically interesting; it is also a nice # example of real-world application of statistical estimation. # # For more on this problem, see [this Wikipedia page](https://en.wikipedia.org/wiki/German_tank_problem) and <NAME>, "An Empirical Approach to Economic Intelligence in World War II", *Journal of the American Statistical Association*, March 1947, [available here](https://www.cia.gov/library/readingroom/docs/CIA-RDP79R01001A001300010013-3.pdf). # ## Informative priors # # Among Bayesians, there are two approaches to choosing prior # distributions. Some recommend choosing the prior that best represents # background information about the problem; in that case the prior # is said to be **informative**. The problem with using an informative # prior is that people might have different information or # interpret it differently. So informative priors might seem arbitrary. # # The alternative is a so-called **uninformative prior**, which is # intended to be as unrestricted as possible, in order to let the data # speak for itself. In some cases you can identify a unique prior # that has some desirable property, like representing minimal prior # information about the estimated quantity. # # Uninformative priors are appealing because they seem more # objective. But I am generally in favor of using informative priors. # Why? First, Bayesian analysis is always based on # modeling decisions. Choosing the prior is one of those decisions, but # it is not the only one, and it might not even be the most subjective. # So even if an uninformative prior is more objective, the entire analysis is still subjective. # # Also, for most practical problems, you are likely to be in one of two # situations: either you have a lot of data or not very much. If you have a lot of data, the choice of the prior doesn't matter; # informative and uninformative priors yield almost the same results. # If you don't have much data, using relevant background information (like the power law distribution) makes a big difference. # # And if, as in the German tank problem, you have to make life and death # decisions based on your results, you should probably use all of the # information at your disposal, rather than maintaining the illusion of # objectivity by pretending to know less than you do. # ## Summary # # This chapter introduces the train problem, which turns out to have the same likelihood function as the dice problem, and which can be applied to the German Tank problem. # In all of these examples, the goal is to estimate a count, or the size of a population. # # In the next chapter, I'll introduce "odds" as an alternative to probabilities, and Bayes's Rule as another form of Bayes's Theorem. # We'll compute distributions of sums and products, and use them to estimate the number of Members of Congress who are corrupt, among other problems. # # But first, you might want to work on these exercises. # ## Exercises # **Exercise:** Suppose you are giving a talk in a large lecture hall and the fire marshal interrupts because they think the audience exceeds 1200 people, which is the safe capacity of the room. # # You think there are fewer then 1200 people, and you offer to prove it. # It would take too long to count, so you try an experiment: # # * You ask how many people were born on May 11 and two people raise their hands. # # * You ask how many were born on May 23 and 1 person raises their hand. # * Finally, you ask how many were born on August 1, and no one raises their hand. # # How many people are in the audience? What is the probability that there are more than 1200 people. # Hint: Remember the binomial distribution. # + # Solution # I'll use a uniform prior from 1 to 2000 # (we'll see that the probability is small that there are # more than 2000 people in the room) hypos = np.arange(1, 2000, 10) prior = Pmf(1, hypos) prior.normalize() # + # Solution # We can use the binomial distribution to compute the probability # of the data for each hypothetical audience size from scipy.stats import binom likelihood1 = binom.pmf(2, hypos, 1/365) likelihood2 = binom.pmf(1, hypos, 1/365) likelihood3 = binom.pmf(0, hypos, 1/365) # + # Solution # Here's the update posterior = prior * likelihood1 * likelihood2 * likelihood3 posterior.normalize() # + # Solution # And here's the posterior distribution posterior.plot() decorate(xlabel='Number of people in the audience', ylabel='PMF') # + # Solution # If we have to guess the audience size, # we might use the posterior mean posterior.mean() # + # Solution # And we can use prob_gt to compute the probability # of exceeding the capacity of the room. # It's about 1%, which may or may not satisfy the fire marshal posterior.prob_gt(1200) # - # **Exercise:** I often see [rabbits](https://en.wikipedia.org/wiki/Eastern_cottontail) in the garden behind my house, but it's not easy to tell them apart, so I don't really know how many there are. # # Suppose I deploy a motion-sensing [camera trap](https://en.wikipedia.org/wiki/Camera_trap) that takes a picture of the first rabbit it sees each day. After three days, I compare the pictures and conclude that two of them are the same rabbit and the other is different. # # How many rabbits visit my garden? # # To answer this question, we have to think about the prior distribution and the likelihood of the data: # # * I have sometimes seen four rabbits at the same time, so I know there are at least that many. I would be surprised if there were more than 10. So, at least as a starting place, I think a uniform prior from 4 to 10 is reasonable. # # * To keep things simple, let's assume that all rabbits who visit my garden are equally likely to be caught by the camera trap in a given day. Let's also assume it is guaranteed that the camera trap gets a picture every day. # + # Solution hypos = np.arange(4, 11) prior = Pmf(1, hypos) # + # Solution # The probability that the second rabbit is the same as the first is 1/N # The probability that the third rabbit is different is (N-1)/N N = hypos likelihood = (N-1) / N**2 # + # Solution posterior = prior * likelihood posterior.normalize() posterior.bar(alpha=0.7) decorate(xlabel='Number of rabbits', ylabel='PMF', title='The Rabbit Problem') # - # **Exercise:** Suppose that in the criminal justice system, all prison sentences are either 1, 2, or 3 years, with an equal number of each. One day, you visit a prison and choose a prisoner at random. What is the probability that they are serving a 3-year sentence? What is the average remaining sentence of the prisoners you observe? # + # Solution # Here's the prior distribution of sentences hypos = np.arange(1, 4) prior = Pmf(1/3, hypos) prior # + # Solution # If you visit a prison at a random point in time, # the probability of observing any given prisoner # is proportional to the duration of their sentence. likelihood = hypos posterior = prior * likelihood posterior.normalize() posterior # + # Solution # The mean of the posterior is the average sentence. # We can divide by 2 to get the average remaining sentence. posterior.mean() / 2 # - # **Exercise:** If I chose a random adult in the U.S., what is the probability that they have a sibling? To be precise, what is the probability that their mother has had at least one other child. # # [This article from the Pew Research Center](https://www.pewsocialtrends.org/2015/05/07/family-size-among-mothers/) provides some relevant data. # + [markdown] tags=["hide-cell"] # From it, I extracted the following distribution of family size for mothers in the U.S. who were 40-44 years old in 2014: # + tags=["hide-cell"] import matplotlib.pyplot as plt qs = [1, 2, 3, 4] ps = [22, 41, 24, 14] prior = Pmf(ps, qs) prior.bar(alpha=0.7) plt.xticks(qs, ['1 child', '2 children', '3 children', '4+ children']) decorate(ylabel='PMF', title='Distribution of family size') # + [markdown] tags=["hide-cell"] # For simplicity, let's assume that all families in the 4+ category have exactly 4 children. # + # Solution # When you choose a person a random, you are more likely to get someone # from a bigger family; in fact, the chance of choosing someone from # any given family is proportional to the number of children likelihood = qs posterior = prior * likelihood posterior.normalize() posterior # + # Solution # The probability that they have a sibling is the probability # that they do not come from a family of 1 1 - posterior[1] # + # Solution # Or we could use prob_gt again posterior.prob_gt(1) # - # **Exercise:** The [Doomsday argument](https://en.wikipedia.org/wiki/Doomsday_argument) is "a probabilistic argument that claims to predict the number of future members of the human species given an estimate of the total number of humans born so far." # # Suppose there are only two kinds of intelligent civilizations that can happen in the universe. The "short-lived" kind go exinct after only 200 billion individuals are born. The "long-lived" kind survive until 2,000 billion individuals are born. # And suppose that the two kinds of civilization are equally likely. # Which kind of civilization do you think we live in? # # The Doomsday argument says we can use the total number of humans born so far as data. # According to the [Population Reference Bureau](https://www.prb.org/howmanypeoplehaveeverlivedonearth/), the total number of people who have ever lived is about 108 billion. # # Since you were born quite recently, let's assume that you are, in fact, human being number 108 billion. # If $N$ is the total number who will ever live and we consider you to be a randomly-chosen person, it is equally likely that you could have been person 1, or $N$, or any number in between. # So what is the probability that you would be number 108 billion? # # Given this data and dubious prior, what is the probability that our civilization will be short-lived? # + # Solution hypos = [200, 2000] prior = Pmf(1, hypos) # + # Solution likelihood = 1/prior.qs posterior = prior * likelihood posterior.normalize() posterior # + # According to this analysis, the probability is about 91% that our # civilization will be short-lived. # But this conclusion is based on a dubious prior. # And with so little data, the posterior depends strongly on the prior. # To see that, run this analysis again with a different prior, # and see what the results look like. # What do you think of the Doomsday argument? # -
soln/chap05.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import nilearn import nilearn.plotting import os import os.path as op import shutil import subprocess import argparse import getpass from glob import glob from nilearn import surface from nilearn import plotting import nibabel as nib import numpy as np import matplotlib.figure import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from matplotlib.colors import ListedColormap, LinearSegmentedColormap # - nilearn.plotting.plot_img('./p_group-dinsula_L-connect.nii', cut_coords=(2, 8, 40), output_file=None, display_mode= 'ortho', figure=None, axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=False, colorbar=True, resampling_interpolation='continuous', bg_img='./MNI152_T1_2009c.nii', vmin=None, vmax=None) #Accent, Accent_r, Blues, Blues_r, #BrBG, BrBG_r, BuGn, BuGn_r, BuPu, #BuPu_r, CMRmap, CMRmap_r, Dark2, #Dark2_r, GnBu, GnBu_r, Greens, #Greens_r, Greys, Greys_r, OrRd, OrRd_r, #Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, #Pastel1, Pastel1_r, Pastel2, Pastel2_r, PiYG, PiYG_r, #PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr, PuOr_r, PuRd, #PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, #RdGy_r, RdPu, RdPu_r, RdYlBu, RdYlBu_r, RdYlGn, #RdYlGn_r, Reds, Reds_r, Set1, Set1_r, Set2, #Set2_r, Set3, Set3_r, Spectral, Spectral_r, #Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, YlGn_r, #YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot, afmhot_r, #autumn, autumn_r, binary, binary_r, #black_blue, #black_blue_r, black_green, black_green_r, #black_pink, black_pink_r, #black_purple, black_purple_r, black_red, black_red_r, #blue_orange, blue_orange_r, blue_red, blue_red_r, blue_transparent, #blue_transparent_full_alpha_range, bone, #bone_r, brg, brg_r, brown_blue, brown_blue_r, brown_cyan, #brown_cyan_r, bwr, bwr_r, cividis, cividis_r, cold_hot, cold_hot_r, #cold_white_hot, cold_white_hot_r, cool, cool_r, coolwarm, coolwarm_r, copper, #copper_r, cubehelix, cubehelix_r, cyan_copper, cyan_copper_r, cyan_orange, cyan_orange_r, #flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r, gist_heat, gist_heat_r, #gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r, gist_stern, #gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, #gnuplot2, gnuplot2_r, gnuplot_r, gray, gray_r, green_transparent, #green_transparent_full_alpha_range, hot, hot_black_bone, #hot_black_bone_r, hot_r, hot_white_bone, hot_white_bone_r, hsv, hsv_r, #inferno, inferno_r, jet, jet_r, magma, magma_r, nipy_spectral, nipy_spectral_r, #ocean, ocean_hot, ocean_hot_r, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, purple_blue, purple_blue_r, purple_green, purple_green_r, rainbow, rainbow_r, #red_transparent, red_transparent_full_alpha_range, roy_big_bl, seismic, #seismic_r, spring, spring_r, summer, summer_r, tab10, tab10_r, tab20, tab20_r, #tab20b, tab20b_r, tab20c, tab20c_r, terrain, terrain_r, twilight, twilight_r, twilight_shifted, #twilight_shifted_r, videen_style, viridis, viridis_r, winter, winter_r # + # dorsal # dorsal vs. posterior fig = plt.figure(figsize=(40, 4)) gs = GridSpec(nrows=1, ncols=4) ax0 = fig.add_subplot(gs[0, 0]) nilearn.plotting.plot_stat_map('./p_group-dinsula_L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax0, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=12, resampling_interpolation='continuous') ax1 = fig.add_subplot(gs[0,1]) nilearn.plotting.plot_stat_map('./d_vs_p-L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax1, title=None, threshold=4.068, annotate=True, draw_cross=False, black_bg=None, cmap= 'gnuplot2', symmetric_cbar='auto', dim=0, vmax=9, resampling_interpolation='continuous') #ax2 = fig.add_subplot(gs[0,2]) #output_file='./DL.png', fig.savefig(fname='./row1.png', dpi=600) plt.show() # + # posterior # dorsal vs ventral. fig = plt.figure(figsize=(40, 4)) gs = GridSpec(nrows=1, ncols=4) ax0 = fig.add_subplot(gs[0, 0]) nilearn.plotting.plot_stat_map('./p_group-pinsula_L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax0, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=12, resampling_interpolation='continuous') ax1 = fig.add_subplot(gs[0,1]) nilearn.plotting.plot_stat_map('./d_vs_v-L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax1, title=None, threshold=4.068, annotate=True, draw_cross=False, black_bg=None, cmap= 'gnuplot2', symmetric_cbar='auto', dim=0, vmax=9, resampling_interpolation='continuous') #ax2 = fig.add_subplot(gs[0,2]) #output_file='./DL.png', fig.savefig(fname='./row2.png', dpi=600) plt.show() # + # ventral # posterior vs ventral. fig = plt.figure(figsize=(40, 4)) gs = GridSpec(nrows=1, ncols=4) ax0 = fig.add_subplot(gs[0, 0]) nilearn.plotting.plot_stat_map('./p_group-vinsula_L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax0, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=10, resampling_interpolation='continuous') ax1 = fig.add_subplot(gs[0,1]) nilearn.plotting.plot_stat_map('./p_vs_v-L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(1, 8, 40), display_mode= 'ortho', colorbar=True, figure=fig, axes=ax1, title=None, threshold=4.068, annotate=True, draw_cross=False, black_bg=None, cmap= 'gnuplot2', symmetric_cbar='auto', dim=0, vmax=9, resampling_interpolation='continuous') #ax2 = fig.add_subplot(gs[0,2]) #output_file='./DL.png', fig.savefig(fname='./row3.png', dpi=600) plt.show() # - nilearn.plotting.plot_stat_map('./p_group-dinsula_R-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(2, 8, 40), display_mode='ortho', colorbar=True, figure='fig', axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=None, resampling_interpolation='continuous') #output_file='./DR.png', # + nilearn.plotting.plot_stat_map('./p_group-vinsula_L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(2, -12, -8), output_file='./VL.png', display_mode='ortho', colorbar=False, figure=None, axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=None, resampling_interpolation='continuous') #viridis #jet nilearn.plotting.plot_stat_map('./p_group-vinsula_R-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(2, -12, -8), output_file='./VR.png', display_mode='ortho', colorbar=True, figure=None, axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=None, resampling_interpolation='continuous') # + nilearn.plotting.plot_stat_map('./p_group-pinsula_L-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(2, -2, 40), output_file='./PL.png', display_mode='ortho', colorbar=False, figure=None, axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=None, resampling_interpolation='continuous') plotting.show() #viridis #jet nilearn.plotting.plot_stat_map('./p_group-pinsula_R-connect.nii', bg_img='./MNI152_T1_2009c.nii', cut_coords=(2, -2, 40), output_file='./PR.png', display_mode='ortho', colorbar=True, figure=None, axes=None, title=None, threshold=3.89, annotate=True, draw_cross=False, black_bg=None, cmap= 'jet', symmetric_cbar='auto', dim=0, vmax=None, resampling_interpolation='continuous') # + from nilearn import datasets fsaverage = datasets.fetch_surf_fsaverage() from nilearn import surface right = surface.vol_to_surf('./p_group-dinsula_L-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-dinsula_L-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_DL_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_DL_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_DL_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_DL_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') right = surface.vol_to_surf('./p_group-dinsula_R-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-dinsula_R-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_DR_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_DR_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_DR_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_DR_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') # + right = surface.vol_to_surf('./p_group-vinsula_L-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-vinsula_L-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_VL_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_VL_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_VL_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_VL_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') right = surface.vol_to_surf('./p_group-vinsula_R-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-vinsula_R-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_VR_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_VR_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_VR_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_VR_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') # + right = surface.vol_to_surf('./p_group-pinsula_L-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-pinsula_L-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_PL_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_PL_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_PL_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_PL_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') right = surface.vol_to_surf('./p_group-pinsula_R-connect.nii', fsaverage.pial_right) left = surface.vol_to_surf('./p_group-pinsula_R-connect.nii', fsaverage.pial_left) nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_PR_lat.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_right, right, bg_map=fsaverage.sulc_right, output_file='./r_PR_mid.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_PR_mid.png', hemi='right', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') nilearn.plotting.plot_surf_stat_map(fsaverage.pial_left, left, bg_map=fsaverage.sulc_left, output_file='./l_PR_lat.png', hemi='left', view='lateral', threshold=3.89, alpha='auto', vmax=None, cmap='jet', colorbar=True, symmetric_cbar='auto') # -
insula_rsFC/Figure_making/average_connectivity_figures.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import os.path as osp import numpy as np import cv2 import sys sys.path.insert(0, '../tools') import _init_paths from pycocotools.coco import COCO from pycocotools import mask as COCOmask from scipy.misc import imread, imresize import matplotlib.pyplot as plt # %matplotlib inline # - # load coco annotations IMAGE_DIR = '../data/coco/images/val2014/' annotation_file = '../data/coco/annotations/instances_minival2014.json' coco = COCO(annotation_file) # let's check one ann ann_ids = coco.getAnnIds() # ## check one object ix = 17 ann_id = ann_ids[ix] ann = coco.loadAnns([ann_id])[0] m = coco.annToMask(ann) plt.imshow(m) print(m.dtype, m.shape) img = coco.loadImgs([ann['image_id']])[0] im = imread(osp.join(IMAGE_DIR, img['file_name'])) plt.imshow(im) # ## Use GrabCut to do the segmentation cv_im = cv2.imread(osp.join(IMAGE_DIR, img['file_name'])) mask = np.zeros(cv_im.shape[:2], np.uint8) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (int(ann['bbox'][0]), int(ann['bbox'][1]), int(ann['bbox'][2]), int(ann['bbox'][3])) print(rect) cv2.grabCut(cv_im,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT) grab_mask = np.where((mask==2)|(mask==0),0,1).astype('uint8') plt.imshow(grab_mask) # ## Encode and Decode RLE rle = COCOmask.encode(np.asfortranarray(grab_mask)) grab_mask2 = COCOmask.decode(rle) plt.imshow(grab_mask2)
pyutils/mask-faster-rcnn/test/test_grabcut.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import matplotlib.pyplot as plt import lightkurve as lk # - name ='TIC 260353074' sr = lk.search_lightcurve(name, author='SPOC') sr lcfs = sr.download_all() # Use the normalized PDCSAP_FLUX lc = lcfs[0].normalize() # Loop through the rest of the LCFS, appending to the first light curve for lcf in lcfs[1:]: lc = lc.append(lcf.normalize()) q0 = lc.quality == 0 lc = lc[q0].remove_nans() lc.scatter() # + times = lc.time flux = lc.flux - np.mean(lc1.flux) + 1 mags = -2.5 * np.log10(flux) mags = mags - np.mean(mags) x_TIC = times y_TIC = mags # - sort_idx = np.argsort(x_TIC) n=25 segment_size = [0]+sorted(np.argsort(np.diff(x_TIC[sort_idx]))[::-1][:25]) segment_borders = [0]+[i+1 for i in sorted(np.argsort(np.diff(x_TIC[sort_idx]))[::-1][:n])]+[len(x_TIC)] iterr = 0 #sort_idx = np.argsort(t_combRT) for i in range(0,n): print(iterr) plt.scatter(x_TIC[sort_idx][segment_borders[i]:segment_borders[i+1]], y_TIC[sort_idx][segment_borders[i]:segment_borders[i+1]], s=1) iterr += 1 #AS #freq, amp = AS(t_TESS, highpass(m_TESS, 5, 50)[5], 15) freq, amp = AS(x_TIC, y_TIC, 15) fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax.plot(freq, amp) freq=freq[1000:] amp=amp[1000:] ax.plot(freq, amp) max_power = amp.max() peaks, _ = find_peaks(amp, height=max_power*0.4, distance=200) ax.plot(freq[peaks], amp[peaks], "x") sort_idx = np.argsort(amp[peaks])[::-1] f_TESS = freq[peaks][sort_idx] a_TESS = amp[peaks][sort_idx] print(f_TESS) print(a_TESS) print(len(f_TESS)) # + f_TESS_list = [11.99158946, 14.63919138, 20.87840057, 13.00383616] a_TESS_list = [0.00246508, 0.00197533, 0.00191925, 0.00184786] f_TESS_list = [11.99158946, 14.63919138, 20.87840057, 13.00383616, 12.49446322, 14.01831695, 11.487294 ] a_TESS_list = [0.00246508, 0.00197533, 0.00191925, 0.00184786, 0.00144314, 0.00106235, 0.00103356] sort_idx = np.argsort(x_TIC) segment_borders = [0]+[i+1 for i in sorted(np.argsort(np.diff(x_TIC[sort_idx]))[::-1][:24])]+[len(x_TIC)] times = x_TIC magnitudes = y_TIC time_slice, mag_slice, phase, phase_err = [], [], [], [] time_delays, time_delays_err, time_midpoints = [], [], [] iteration = 0 # Iterate over lightcurve for t, y, idx in zip(times, magnitudes, range(len(times))): time_slice.append(t) mag_slice.append(y) # In each segment if idx == segment_borders[iteration+1]-1: print(iteration) print(idx) fig1, ax1 = plt.subplots(1, 1, figsize=(6.5, 3.5*2.5/2)) ax1.scatter(time_slice, mag_slice) ax1.axvline(min(time_slice)) ax1.axvline(max(time_slice)) print('Calc AS') #AS################ freq, amp = AS(time_slice, mag_slice, 5) fig, ax = plt.subplots(1, 1, figsize=(7, 4)) ax.plot(freq, amp) freq=freq[50:] amp=amp[50:] ax.plot(freq, amp) max_power = amp.max() peaks, _ = find_peaks(amp, height=max_power*0.5, distance=50) ax.plot(freq[peaks], amp[peaks], "x") sort_idx = np.argsort(amp[peaks])[::-1] f_TESS = freq[peaks][sort_idx] a_TESS = amp[peaks][sort_idx] print(f_TESS) print(a_TESS) print(len(f_TESS)) ##################### # Append the time midpoint time_midpoints.append(np.mean(time_slice)) # And the phases for each frequency print('Calc LS fit') sol = phases_simon(time_slice, mag_slice, f_TESS_list, a_TESS_list, [0.5]*len(f_TESS_list)) print(sol) phase_i = sol[2]*2*np.pi phase.append(dft_phase(time_slice, mag_slice, f_TESS_list)) #phase.append(phase_i) phase_err_i = sol[3]*2*np.pi phase_err.append(phase_err_i) time_slice, mag_slice = [], [] iteration += 1 phase = np.array(phase).T print(phase) phase = np.unwrap(phase) phase -= np.mean(phase) print(phase) phase_err = np.array(phase_err).T print('Calc TD') # Phase wrapping patch for ph, ph_err, f in zip(phase, phase_err, f_TESS_list): td = ph / (2*np.pi*f) time_delays.append(td-np.mean(td)) time_delays_err.append(ph_err / (2*np.pi*f)) print('Plot TD') fig, ax = plt.subplots(1, 1, figsize=(6.5, 3.5*2.5/2)) for i in range(len(f_TESS_list)): #ax[0].scatter(time_midpoints, time_delays[i]*86400, s=5) ax.errorbar(time_midpoints, (time_delays[i]-time_delays[i][-1])*86400, yerr = time_delays_err[i]*86400, fmt='.') #ax[0].set_ylim(-800, 800) #ax[1].set_ylim(-800, 800) #plt.tight_layout() #fig.savefig('f_TESS1.png', tight_layout=True) t_td_AT = time_midpoints tau_td_AT = [] for j in range(len(f_TESS_list)): tau_td_AT.append(np.array([i*86400 for i in time_delays][j]-[i*86400 for i in time_delays][j][-1])) tau_err_td_AT = time_delays_err
TESS_FULL_2021.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: desc-python # language: python # name: desc-python # --- # # Explore BPZ photo-z's using friends-of-friends catalog matching # # Owners: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> <br> # Last Verified to Run: 2019-01-02 # # This notebook is adapted from `matching_fof.ipynb` to test the performance of photo-z's derived from BPZ. See the `object_gcr_4_photoz.ipynb` notebook for a more in-depth tutorial on how to access the photo-z data. # %pylab inline plt.style.use('seaborn-poster') import FoFCatalogMatching import GCRCatalogs from astropy.table import Table import pandas as pd # ## Get DC2 1.2i Data #Load the DC2 object catalog, with BPZ photo-z's in the `photoz_mode` column. object_cat = GCRCatalogs.load_catalog('dc2_object_run1.2i_with_photoz') #all columns catalog so we can do WL cuts catalog = GCRCatalogs.load_catalog('dc2_object_run1.2i_all_columns') from GCR import GCRQuery star_sprinkled_filter = [ ~GCRQuery('star'), ~GCRQuery('sprinkled'), ~GCRQuery('agn') ] # + # Let's also define a magnitude cut based on LSST gold sample. mag_filters = [ (np.isfinite, 'mag_i'), 'mag_i < 25.0', ] # - #basic cuts basic_cuts = [ GCRQuery('extendedness > 0'), # Extended objects GCRQuery((np.isfinite, 'mag_i')), # Select objects that have i-band magnitudes GCRQuery('clean'), # The source has no flagged pixels (interpolated, saturated, edge, clipped...) # and was not skipped by the deblender GCRQuery('xy_flag == 0') # Bad centroiding ] #we will do a cut on this catalog.add_quantity_modifier('shape_hsm_regauss_etot', (np.hypot, 'ext_shapeHSM_HsmShapeRegauss_e1', 'ext_shapeHSM_HsmShapeRegauss_e2'), overwrite=True) #lensing cuts lensing_cuts = [ ~GCRQuery((np.isnan, 'i_modelfit_CModel_instFlux')), # (from this and below) remove nan entries ~GCRQuery((np.isnan, 'ext_shapeHSM_HsmShapeRegauss_resolution')), ~GCRQuery((np.isnan, 'ext_shapeHSM_HsmShapeRegauss_e1')), ~GCRQuery((np.isnan, 'ext_shapeHSM_HsmShapeRegauss_e2')), GCRQuery('snr_i_cModel >= 10'), GCRQuery('detect_isPrimary'), # (from this and below) basic flag cuts ~GCRQuery('deblend_skipped'), ~GCRQuery('base_PixelFlags_flag_edge'), ~GCRQuery('base_PixelFlags_flag_interpolatedCenter'), ~GCRQuery('base_PixelFlags_flag_saturatedCenter'), ~GCRQuery('base_PixelFlags_flag_crCenter'), ~GCRQuery('base_PixelFlags_flag_bad'), ~GCRQuery('base_PixelFlags_flag_suspectCenter'), ~GCRQuery('base_PixelFlags_flag_clipped'), ~GCRQuery('ext_shapeHSM_HsmShapeRegauss_flag'), GCRQuery('ext_shapeHSM_HsmShapeRegauss_resolution >= 0.3'), GCRQuery('shape_hsm_regauss_etot < 2'), GCRQuery('ext_shapeHSM_HsmShapeRegauss_sigma <= 0.4'), GCRQuery('mag_i_cModel < 24.5'), # FIXME: Doesnt have exinction correction GCRQuery('base_Blendedness_abs_instFlux < 10**(-0.375)'), ] # Load ra and dec from object, using both of the filters we just defined. object_data = object_cat.get_quantities(['ra', 'dec', 'objectId', 'photoz_mode','photoz_pdf'], filters=basic_cuts+mag_filters) catalog_data = catalog.get_quantities(['objectId', 'mag_i_cModel'], filters=lensing_cuts) object_data['photoz_pdf'].shape # ## Join pz data table to the all columns table #put the dc2 data into pandas dataframes object_df = pd.DataFrame({"ra":object_data['ra'], "dec":object_data['dec'], 'photoz_mode':object_data['photoz_mode'], "photoz_pdf_key":np.arange(object_data['ra'].size)}, index=object_data['objectId']) catalog_df = pd.DataFrame({"mag_i_cModel":catalog_data['mag_i_cModel']}, index=catalog_data['objectId']) #join the data frames joined = object_df.join(catalog_df, how='inner') joined_data = {"objectId":joined.index, "ra":joined['ra'].values, "dec":joined['dec'].values, "mag_i_cModel":joined['mag_i_cModel'].values, "photoz_pdf_key":joined['photoz_pdf_key'].values, "photoz_mode":joined['photoz_mode'].values} # # Get Truth Data # Let's now turn to the truth catalog. Turn off md5 sum check to save time. truth_cat = GCRCatalogs.load_catalog('dc2_truth_run1.2_static', {'md5': None}) # We see there is no mag_i, but only mag_true_i (i.e., magnitude before lensing), and it maps to `i` print(truth_cat.get_quantity_modifier('mag_true_i')) # To make our `mag_filters` work, let's define mag_i for the truth catalog truth_cat.add_quantity_modifier('mag_i', 'i') # Get ra and dec from truth catalog # Note that we add i < 25.0 to the native filter to speed up load time truth_native_filters = (['i < 25.0']) truth_data = truth_cat.get_quantities(['ra', 'dec', 'object_id', 'redshift'], filters=star_sprinkled_filter, native_filters=truth_native_filters) print (len(joined_data['ra'])) print (len(truth_data['ra'])) # ## Catalog FoF Matching # + # Now we can really do the matching! # FoFCatalogMatching.match takes a dictionary of catalogs to match, a friends-of-friends linking length. # Because our "catalog" is not an astropy table or pandas dataframe, # `len(truth_coord)` won't give the actual length of the table # so we need to specify `catalog_len_getter` so that the code knows how to get the length of the catalog. results = FoFCatalogMatching.match( catalog_dict={'truth': truth_data, 'coadd': joined_data}, linking_lengths=1.0, catalog_len_getter=lambda x: len(x['ra']), ) # - # FoFCatalogMatching.match returns an astropy table results results.write('/global/cscratch1/sd/ihasan/fof_wl_cuts.txt', format='ascii', overwrite=True) #read in results to avoid doing matching over again results = Table.read('/global/cscratch1/sd/ihasan/fof_wl_cuts.txt', format='ascii') # + # now we want to count the number of truth and objects *for each group* # but instead of looping over groups, we can do this in a smart (and very fast) way # first we need to know which rows are from the truth catalog and which are from the coadd truth_mask = results['catalog_key'] == 'truth' object_mask = ~truth_mask # then np.bincount will give up the number of id occurrences (like histogram but with integer input) n_groups = results['group_id'].max() + 1 n_truth = np.bincount(results['group_id'][truth_mask], minlength=n_groups) n_object = np.bincount(results['group_id'][object_mask], minlength=n_groups) # now n_truth and n_coadd are the number of truth/coadd objects in each group # we want to make a 2d histrogram of (n_truth, n_coadd). n_max = max(n_truth.max(), n_object.max()) + 1 hist_2d = np.bincount(n_object * n_max + n_truth, minlength=n_max*n_max).reshape(n_max, n_max) plt.imshow(np.log10(hist_2d+1), extent=(-0.5, n_max-0.5, -0.5, n_max-0.5), origin='lower'); plt.xlabel('Number of truth objects'); plt.ylabel('Number of coadd objects'); plt.colorbar(label=r'$\log(N_{\rm groups} \, + \, 1)$'); # + # let's further inspect the objects in the groups that have 1-to-1 truth/coadd match. # first, let's find our the IDs of the groups that have 1-to-1 truth/coadd match: one_to_one_group_mask = np.in1d(results['group_id'], np.flatnonzero((n_truth == 1) & (n_object == 1))) # and then we can find the row indices in the *original* truth/coadd catalogs for those 1-to-1 groups truth_idx = results['row_index'][one_to_one_group_mask & truth_mask] object_idx = results['row_index'][one_to_one_group_mask & object_mask] # - #z-z plot plt.figure(figsize=(10,7)) plt.hexbin(truth_data['redshift'][truth_idx], joined_data['photoz_mode'][object_idx], gridsize=50) plt.xlabel('z_truth') plt.ylabel('z_bpz') plt.show() pdf_idx = joined_data['photoz_pdf_key'][object_idx] top = np.trapz(object_cat.photoz_pdf_bin_centers*object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) bottom = np.trapz(object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) pzmean = top/bottom histmean = truth_data['redshift'][truth_idx].mean() plt.plot(object_cat.photoz_pdf_bin_centers, object_data['photoz_pdf'][pdf_idx].sum(axis=0), label='stacked p(z)') plt.hist(truth_data['redshift'][truth_idx], bins=object_cat.photoz_pdf_bin_centers, histtype='step', lw=3, label='True z'); plt.axvline(pzmean, ls=':', color='C0') plt.axvline(histmean, ls=':', color='C1') plt.xlabel('z') plt.ylabel('count') plt.legend() plt.savefig('../plots/stacked-vs-true-1-1.png') #courtesy Sam def fastCalcPIT(zgrid,pdf,sz): cdf = np.cumsum(pdf) idx = np.searchsorted(zgrid,sz,side='left') if sz <= zgrid[0]: return 0.0 if sz >= zgrid[-1]: return 1.0 y1,y2 = cdf[idx-1],cdf[idx] #print(f"ys: {y1},{y2}") x1,x2 = zgrid[idx-1],zgrid[idx] #print(f"xs: {x1},{x2}") delx = (sz-x1)*0.5 if np.isclose(delx,0.0): return y1 else: slope = (y2-y1)/(x2-x1) #print(f"slope: {slope}") finy = y1 + slope*delx return finy #calculate PIT histogram PITS = [] count = 0 for i, j, in zip(object_data['photoz_pdf'][pdf_idx], truth_data['redshift'][truth_idx]): pit = fastCalcPIT(object_cat.photoz_pdf_bin_centers,i,j) PITS.append(pit) count += 1 if count % 100000 == 0: print(count) # PIT histogram for the entire sample, no tomographic binning plt.hist(PITS, bins=50, histtype='step', lw=3); plt.xlabel('PIT') plt.ylabel('Count') numPIT = len(PITS) plt.axhline(numPIT/50, ls='--', color='k') plt.savefig('../plots.PIT-1-1.png') # + # two to one match two_to_one_group_mask = np.in1d(results['group_id'], np.flatnonzero((n_truth == 2) & (n_object == 1))) # and then we can find the row indices in the *original* truth/coadd catalogs for those 1-to-1 groups truth_idx = results['row_index'][two_to_one_group_mask & truth_mask] object_idx = results['row_index'][two_to_one_group_mask & object_mask] # - pdf_idx = joined_data['photoz_pdf_key'][object_idx] top = np.trapz(object_cat.photoz_pdf_bin_centers*object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) bottom = np.trapz(object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) pzmean = top/bottom histmean = truth_data['redshift'][truth_idx].mean() plt.plot(object_cat.photoz_pdf_bin_centers, object_data['photoz_pdf'][pdf_idx].sum(axis=0), label='stacked p(z)') plt.hist(truth_data['redshift'][truth_idx], bins=object_cat.photoz_pdf_bin_centers, histtype='step', lw=3, label='True z'); plt.xlabel('z') plt.ylabel('count') plt.legend() plt.savefig('../plots/stacked-vs-true-2-1.png') # ## Repeat Matching, but now making an arbitrary tomographic bin tbin_mask = joined_data['photoz_mode'] > .4 tbin_mask &= joined_data['photoz_mode'] <= .6 joined['photoz_pdf'].values.sum() # + joined_data_tbin = {} for k in joined_data.keys(): print(k) joined_data_tbin[k] = joined_data[k][tbin_mask] # - results = FoFCatalogMatching.match( catalog_dict={'truth': truth_data, 'coadd': joined_data_tbin}, linking_lengths=1.0, catalog_len_getter=lambda x: len(x['ra']), ) # + # now we want to count the number of truth and objects *for each group* # but instead of looping over groups, we can do this in a smart (and very fast) way # first we need to know which rows are from the truth catalog and which are from the coadd truth_mask = results['catalog_key'] == 'truth' object_mask = ~truth_mask # then np.bincount will give up the number of id occurrences (like histogram but with integer input) n_groups = results['group_id'].max() + 1 n_truth = np.bincount(results['group_id'][truth_mask], minlength=n_groups) n_object = np.bincount(results['group_id'][object_mask], minlength=n_groups) # now n_truth and n_coadd are the number of truth/coadd objects in each group # we want to make a 2d histrogram of (n_truth, n_coadd). n_max = max(n_truth.max(), n_object.max()) + 1 hist_2d = np.bincount(n_object * n_max + n_truth, minlength=n_max*n_max).reshape(n_max, n_max) plt.imshow(np.log10(hist_2d+1), extent=(-0.5, n_max-0.5, -0.5, n_max-0.5), origin='lower'); plt.xlabel('Number of truth objects'); plt.ylabel('Number of coadd objects'); plt.colorbar(label=r'$\log(N_{\rm groups} \, + \, 1)$'); # + # let's further inspect the objects in the groups that have 1-to-1 truth/coadd match. # first, let's find our the IDs of the groups that have 1-to-1 truth/coadd match: one_to_one_group_mask = np.in1d(results['group_id'], np.flatnonzero((n_truth == 1) & (n_object == 1))) # and then we can find the row indices in the *original* truth/coadd catalogs for those 1-to-1 groups truth_idx = results['row_index'][one_to_one_group_mask & truth_mask] object_idx = results['row_index'][one_to_one_group_mask & object_mask] # - #z-z plot plt.figure(figsize=(10,7)) plt.hexbin(truth_data['redshift'][truth_idx], joined_data_tbin['photoz_mode'][object_idx], gridsize=20) plt.xlabel('z_truth') plt.ylabel('z_bpz') plt.show() pdf_idx = joined_data_tbin['photoz_pdf_key'][object_idx] top = np.trapz(object_cat.photoz_pdf_bin_centers*object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) bottom = np.trapz(object_data['photoz_pdf'][pdf_idx].sum(axis=0), x=object_cat.photoz_pdf_bin_centers) pzmean = top/bottom histmean = truth_data['redshift'][truth_idx].mean() plt.plot(object_cat.photoz_pdf_bin_centers, object_data['photoz_pdf'][pdf_idx].sum(axis=0), label='stacked p(z)') plt.hist(truth_data['redshift'][truth_idx], bins=object_cat.photoz_pdf_bin_centers, histtype='step', lw=3, label='True z'); plt.axvline(pzmean, ls=':', color='C0') plt.axvline(histmean, ls=':', color='C1') plt.xlabel('z') plt.ylabel('count') plt.savefig('../plots/stacked-vs-true-1-1-z4-6.png') plt.legend() PITS = [] count = 0 for i, j, in zip(object_data['photoz_pdf'][pdf_idx], truth_data['redshift'][truth_idx]): pit = fastCalcPIT(object_cat.photoz_pdf_bin_centers,i,j) PITS.append(pit) count += 1 if count % 100000 == 0: print(count) # PIT histogram for the entire sample, no tomographic binning plt.hist(PITS, bins=50, histtype='step', lw=3); plt.xlabel('PIT') plt.ylabel('Count') numPIT = len(PITS) plt.axhline(numPIT/50, ls='--', color='k') plt.savefig('../plots/PIT-1-1-z4-6.png')
notebooks/matching_fof_bpz_wl_cuts.ipynb