code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (geog) # language: python # name: geog # --- # # Raster-based market access # # Following in the footsteps of the Malaria Atlas Project's <a href="https://developers.google.com/earth-engine/datasets/catalog/Oxford_MAP_friction_surface_2015_v1_0">Global Friction Surface</a>, this notebook explores a set of tools for calculating: # # 1. Travel time rasters # 2. Drive-time polygons # 3. Marketsheds # # + import sys, os, importlib import rasterio import numpy as np import pandas as pd import geopandas as gpd import osmnx as ox import GOSTnets as gn import skimage.graph as graph from rasterio.mask import mask from rasterio import features from shapely.geometry import box, Point, Polygon from scipy.ndimage import generic_filter from pandana.loaders import osm sys.path.append("../") import infrasap.market_access as ma # + tutorial_folder = os.path.join("../", "tutorial_data") dests = os.path.join(tutorial_folder, "destinations.shp") friction_surface = os.path.join(tutorial_folder, "global_friction_surface.tif") inD = gpd.read_file(dests) inR = rasterio.open(friction_surface) frictionD = inR.read()[0,:,:] # convert friction surface to traversal time (lazily). Original data are # the original data are minutes to travel 1 m, so we will convert to # minutes to cross the cell frictionD = frictionD * 1000 mcp = graph.MCP_Geometric(frictionD) # + # ma.calculate_travel_time? # - travel_costs, traceback = ma.calculate_travel_time(inR, mcp, inD) travel_costs = travel_costs.astype(inR.meta['dtype']) with rasterio.open(os.path.join(tutorial_folder, "least_cost_travel_time.tif"), 'w', **inR.meta) as out_file: out_file.write_band(1, travel_costs) # + # ma.generate_feature_vectors? # - drive_time_thresholds = [60, 120, 180, 240] # minutes drive_vectors = ma.generate_feature_vectors(inR, mcp, inD, drive_time_thresholds) drive_vectors.to_file(os.path.join(tutorial_folder, "drive_vectors.shp")) # + # ma.generate_market_sheds? # - importlib.reload(ma) outfile = os.path.join(tutorial_folder, "marketsheds.tif") ma.generate_market_sheds(inR, inD, out_file=outfile)
src/GOSTNets_Raster/notebooks/TUTORIAL_MCP_market_access.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/JuJu2181/problem_solving_in_python/blob/master/Assesment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="E_2rbc9eTHKA" outputId="fb157723-3ef9-428e-da25-66f4a5c372d6" print('Start') # + colab={"base_uri": "https://localhost:8080/"} id="DB5D3KwReDwx" outputId="93d7bcc7-aa1b-403c-eb17-8f4719cb3952" #function to convert first letter of word to lowercase and rest to uppercase def convertWord(word): return word[0].lower() + word[1:].upper() # The function is expected to return a string. # The function accepts string as parameter. def logic(my_input): # Write your code here and remove pass statement # Don't print anything. Just return the intended output # You can create other functions and call from here input_list = my_input.split() output_str = "" for word in input_list: output_str = output_str+convertWord(word) return output_str # Do not edit below # Get the input my_input = input() # Print output returned from the logic function print(logic(my_input)) # + colab={"base_uri": "https://localhost:8080/"} id="GgrnpkB0hi4Y" outputId="13e5d23a-b2ba-473e-e153-0fcc2e6abc44" # The function is expected to return an integer. # The function accepts an string array(list of input) and an integer(length of input) as parameters. def logic(inputs, input_length): # Write your code here and remove pass statement # You can create other functions and call from here # Don't print anything. Just return the intended output #both primary and secondary are arrays of length 26 for 26 alphabets #primary array for common characters assuming all chars are common primary = [True]*26 for i in range(input_length): #secondary array for common charactes assuming none are common secondary = [False]*26 #for every character in each strings for j in range(len(inputs[i])): if(primary[ord(inputs[i][j]) - ord('a')]): #if the character is present in all strings we will mark it common in secondary secondary[ord(inputs[i][j])-ord('a')] = True #copy whole secondary array to primary for i in range(26): primary[i] = secondary[i] #list to store common characters in string common_chars = [] for i in range(26): if(primary[i]): # to obtain character represented by ascii of the character in primary common_chars.append("%c " % (i+ord('a'))) return len(common_chars) # Do not edit below # Get the input input_length = int(input()) inputs = [] for x in range(input_length): inputs.append(input()) # Print output returned from the logic function print(logic(inputs, input_length)) # + colab={"base_uri": "https://localhost:8080/"} id="WTMo5HaGlNHI" outputId="f7e3872e-6242-44e0-b6b0-11614bb78e95" #function to check even def isEven(number): return True if number%2 == 0 else False # The function is expected to return a string. # The function accepts string as parameter. def logic(my_input): # Write your code here and remove pass statement # Don't print anything. Just return the intended output # You can create other functions and call from here zero_count = 0 one_count = 0 max_zero_count = 0 max_one_count = 0 for i in range(len(my_input)): if my_input[i] == '0': one_count = 0 zero_count += 1 max_zero_count = max(zero_count,max_zero_count) elif my_input[i] == '1': zero_count = 0 one_count += 1 max_one_count = max(one_count,max_one_count) if isEven(max_zero_count) and not isEven(max_one_count): return 'yes' else: return 'no' # Do not edit below # Get the input my_input = input() # Print output returned from the logic function print(logic(my_input)) # + colab={"base_uri": "https://localhost:8080/"} id="B1alaLAfs0lz" outputId="34bd94d4-9f28-4005-ecf1-1bf2b43af430" print('End') # + id="8qTujPU7wYXu"
Assesment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Imports # %matplotlib inline import pandas as pd from datetime import datetime import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as cpl from mpl_toolkits.mplot3d import Axes3D import matplotlib.cm as cmx from matplotlib.collections import LineCollection import numpy as np import skfuzzy as fuzz from skfuzzy import control as ctrl # + #Constants and Vars DATA_FILE = 'data/driving_log.csv' STEERING_CONST = -5.0 # - # Preprocess CSV file from Unity Simulator def preprocess(data_file): df = pd.read_csv(data_file,header=None, names=['date', 'steering', 'throttle', 'brake', 'velocity', 'rpm', 'torque']) df['date'] = df['date'].apply(lambda x: int(datetime.strptime(x, '%Y_%m_%d_%H_%M_%S_%f').timestamp()*1000)) df['time'] = (df.date - df.date.iloc[0])/1000.0 df['time_diff'] = df.time.diff() return df # + data = preprocess(DATA_FILE) # Add dimensions needed to track position data['distance'] = data.velocity * data.time_diff data['rel_steering'] = data.steering * STEERING_CONST * data.distance data['abs_steering'] = data.rel_steering.cumsum() data['rel_x'] = np.cos(np.deg2rad(data.abs_steering)) * data.distance data['rel_y'] = np.sin(np.deg2rad(data.abs_steering)) * data.distance data['abs_x'] = data.rel_x.cumsum() data['abs_y'] = data.rel_y.cumsum() def calculate_fuzzy_logic_visual(velocity_input, throttle_input): x_velocity = np.arange(0, 100.1, 0.1) x_throttle = np.arange(0, 1.01, 0.01) x_aggressiveness = np.arange(-4, 5, 1) # Generate fuzzy membership functions vel_lo = fuzz.trimf(x_velocity, [0, 0, 40]) vel_md = fuzz.trimf(x_velocity, [15, 50, 85]) vel_hi = fuzz.trimf(x_velocity, [60, 100, 100]) throttle_lo = fuzz.trimf(x_throttle, [0, 0, 0.4]) throttle_md = fuzz.trimf(x_throttle, [0.2, 0.5, 0.8]) throttle_hi = fuzz.trimf(x_throttle, [0.6, 1.0, 1.0]) aggressive_LN = fuzz.trimf(x_aggressiveness, [-4, -4, -2]) aggressive_N = fuzz.trimf(x_aggressiveness, [-3, -2, -1]) aggressive_M = fuzz.trimf(x_aggressiveness, [-2, 0, 2 ]) aggressive_A = fuzz.trimf(x_aggressiveness, [1, 2, 3]) aggressive_HA = fuzz.trimf(x_aggressiveness, [2, 3, 4]) # Visualize these universes and membership functions fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, figsize=(8, 9)) ax0.plot(x_velocity, vel_lo, 'b', linewidth=1.5, label='L') ax0.plot(x_velocity, vel_md, 'g', linewidth=1.5, label='M') ax0.plot(x_velocity, vel_hi, 'r', linewidth=1.5, label='H') ax0.set_title('Vehicle Speed') ax0.legend() ax1.plot(x_throttle, throttle_lo, 'b', linewidth=1.5, label='L') ax1.plot(x_throttle, throttle_md, 'g', linewidth=1.5, label='M') ax1.plot(x_throttle, throttle_hi, 'r', linewidth=1.5, label='H') ax1.set_title('Throttle Position') ax1.legend() ax2.plot(x_aggressiveness, aggressive_LN, 'b', linewidth=1.5, label='LN') ax2.plot(x_aggressiveness, aggressive_N, 'g', linewidth=1.5, label='N') ax2.plot(x_aggressiveness, aggressive_M, 'r', linewidth=1.5, label='M') ax2.plot(x_aggressiveness, aggressive_A, 'y', linewidth=1.5, label='A') ax2.plot(x_aggressiveness, aggressive_HA, 'teal', linewidth=1.5, label='HA') ax2.set_title('Aggressive Behaviour') ax2.legend() velocity = ctrl.Antecedent(x_velocity, 'velocity') throttle = ctrl.Antecedent(x_throttle, 'throttle') aggressiveness = ctrl.Consequent(x_aggressiveness, 'aggressiveness') velocity.automf(names=['L', 'M', 'H']) throttle.automf(names=['L', 'M', 'H']) aggressiveness.automf(names=['LN', 'N', 'M', 'A', 'HA']) rule1 = ctrl.Rule(velocity['L'] & throttle['L'], aggressiveness['LN']) rule2 = ctrl.Rule(velocity['L'] & throttle['M'], aggressiveness['M']) rule3 = ctrl.Rule(velocity['L'] & throttle['H'], aggressiveness['HA']) rule4 = ctrl.Rule(velocity['M'] & throttle['L'], aggressiveness['N']) rule5 = ctrl.Rule(velocity['M'] & throttle['M'], aggressiveness['M']) rule6 = ctrl.Rule(velocity['M'] & throttle['H'], aggressiveness['A']) rule7 = ctrl.Rule(velocity['H'] & throttle['L'], aggressiveness['HA']) rule8 = ctrl.Rule(velocity['H'] & throttle['M'], aggressiveness['A']) rule9 = ctrl.Rule(velocity['H'] & throttle['H'], aggressiveness['HA']) system = ctrl.ControlSystem(rules=[rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9]) aggressive_output = ctrl.ControlSystemSimulation(system) aggressive_output.input['velocity'] = velocity_input aggressive_output.input['throttle'] = throttle_input aggressive_output.compute() return aggressive_output.output['aggressiveness'] # aggressiveness.view(sim=aggressive_output) calculate_fuzzy_logic(100, 0.5) # - class aggFuzzyLogic: def __init__(self): x_velocity = np.arange(0, 100.1, 0.1) x_throttle = np.arange(0, 1.01, 0.01) x_aggressiveness = np.arange(-4, 5, 1) # Generate fuzzy membership functions vel_lo = fuzz.trimf(x_velocity, [0, 0, 40]) vel_md = fuzz.trimf(x_velocity, [15, 50, 85]) vel_hi = fuzz.trimf(x_velocity, [60, 100, 100]) throttle_lo = fuzz.trimf(x_throttle, [0, 0, 0.4]) throttle_md = fuzz.trimf(x_throttle, [0.2, 0.5, 0.8]) throttle_hi = fuzz.trimf(x_throttle, [0.6, 1.0, 1.0]) aggressive_LN = fuzz.trimf(x_aggressiveness, [-4, -4, -2]) aggressive_N = fuzz.trimf(x_aggressiveness, [-3, -2, -1]) aggressive_M = fuzz.trimf(x_aggressiveness, [-2, 0, 2 ]) aggressive_A = fuzz.trimf(x_aggressiveness, [1, 2, 3]) aggressive_HA = fuzz.trimf(x_aggressiveness, [2, 3, 4]) velocity = ctrl.Antecedent(x_velocity, 'velocity') throttle = ctrl.Antecedent(x_throttle, 'throttle') aggressiveness = ctrl.Consequent(x_aggressiveness, 'aggressiveness') velocity.automf(names=['L', 'M', 'H']) throttle.automf(names=['L', 'M', 'H']) aggressiveness.automf(names=['LN', 'N', 'M', 'A', 'HA']) rule1 = ctrl.Rule(velocity['L'] & throttle['L'], aggressiveness['LN']) rule2 = ctrl.Rule(velocity['L'] & throttle['M'], aggressiveness['M']) rule3 = ctrl.Rule(velocity['L'] & throttle['H'], aggressiveness['HA']) rule4 = ctrl.Rule(velocity['M'] & throttle['L'], aggressiveness['N']) rule5 = ctrl.Rule(velocity['M'] & throttle['M'], aggressiveness['M']) rule6 = ctrl.Rule(velocity['M'] & throttle['H'], aggressiveness['A']) rule7 = ctrl.Rule(velocity['H'] & throttle['L'], aggressiveness['HA']) rule8 = ctrl.Rule(velocity['H'] & throttle['M'], aggressiveness['A']) rule9 = ctrl.Rule(velocity['H'] & throttle['H'], aggressiveness['HA']) system = ctrl.ControlSystem(rules=[rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9]) self.agg_simulator = ctrl.ControlSystemSimulation(system) def compute(self, velocity_input, throttle_input): self.agg_simulator.input['velocity'] = velocity_input self.agg_simulator.input['throttle'] = throttle_input self.agg_simulator.compute() return self.agg_simulator.output['aggressiveness'] c = aggFuzzyLogic() c.compute(50, 0.6) # tmp = calculate_fuzzy_logic(100, 1.0) # tmp # + # OPTIONAL: Output processed csv file data.to_csv('data.csv') # - calculate_fuzzy_logic(100, 0.9) # Plot path (x,y) plt.figure() plt.title('Position Graph') plt.ylabel('distance (m)') plt.xlabel('distance (m)') plt.plot(data.abs_x, data.abs_y) # + # Position vs. Time graph fig = plt.figure() ax = fig.gca(projection='3d') ax.plot(xs=data.abs_x, ys=data.abs_y, zs=data.time) ax.set_xlabel('distance (m)') ax.set_ylabel('distance (m)') ax.set_zlabel('time (s)') # - plt.figure() plt.title('Velocity Graph') plt.ylabel('Velocity (m/s)') plt.xlabel('time (s)') plt.plot(data.time, data.velocity) # + # Subplots with velocity & steering vs time fig, ax = plt.subplots() axes = [ax, ax.twinx()] dp = [data.time, data.time] dp2 = [data.velocity, data.abs_steering] y_axis = ["velocity (m/s)", "steering (degrees)"] colors = ('Red', 'Blue') for ax, color, d, d2, y in zip(axes, colors, dp, dp2, y_axis): ax.plot(d, d2, color=color) ax.set_ylabel(y) ax.tick_params(axis='y', colors=color) plt.show() # + # Position Graph with dynamic colors for aggressiveness (throttle) cm = plt.get_cmap('cool') cNorm = cpl.Normalize(vmin=0, vmax=1) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) fig, ax = plt.subplots() xy = [[xx, yy] for (xx,yy) in zip(data.abs_x, data.abs_y)] for start, stop, tt in zip(xy[:-1], xy[1:], data.throttle[:-1]): x, y = zip(start, stop) colorVal = scalarMap.to_rgba(tt) ax.plot(x, y, color=colorVal) plt.title('Position Graph') plt.ylabel('distance (m)') plt.xlabel('distance (m)') plt.show() # + # Position Graph with custom colormap for aggressiveness (throttle) # cm = plt.get_cmap('cool') cdict = {'red': [(0.0, 0.0, 0.0), # (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)], 'green': [(0.0, 0.0, 0.0), # (0.25, 0.0, 0.0), # (0.75, 1.0, 1.0), (1.0, 0.0, 0.0)], 'blue': [(0.0, 1.0, 1.0), # (0.5, 1.0, 1.0), # (0.75, 0.0, 0.0), (1.0, 0.0, 0.0)]} # cm = cpl.LinearSegmentedColormap.from_list("", ["blue","violet","red"]) cm = cpl.LinearSegmentedColormap("", cdict) cNorm = cpl.Normalize(vmin=0, vmax=1) scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) xy = [[xx, yy] for (xx,yy) in zip(data.abs_x, data.abs_y)]#(data.abs_x, data.abs_y) xy = [ [x0, x1] for x0, x1 in zip(xy[:-1], xy[1:])] cSegments = [scalarMap.to_rgba(c) for c in data.throttle] fig, ax = plt.subplots() coll = LineCollection(xy, color=cSegments) ax.add_collection(coll) ax.autoscale_view() plt.show() # -
notebooks/rec/preprocess_simulation_fuzzy_logic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid') money = pd.read_csv('african_crises.csv') money.sample(20) money.describe() money.info() fig,ax = plt.subplots(figsize = (20,10)) sns.countplot(money['country'], hue = money ['systemic_crisis'],ax=ax) plt.xlabel('countries') plt.ylabel('count') plt.xticks(rotation=45) #exchange rate plt.figure(figsize = (15,30)) count = 1 for country in money.country.unique(): plt.subplot(len(money.country.unique()),1,count) count += 1 sns.lineplot(money[money.country==country]['year'],money[money.country == country]['exch_usd']) plt.subplots_adjust(hspace = 0.8) plt.xlabel('Years') plt.ylabel('Exchange Rates') plt.title(country) # # Sovereign Domestic Debt Default # Sovereign debt is a central government's debt. It is debt issued by the national government in a foreign currency in order to finance the issuing country's growth and development. # # Sovereign debt is usually created by borrowing government bonds and bills and issuing securities. Countries that are less creditworthy compared to others directly borrow from world organizations like The World Bank and other international financial institutions. An unfavorable change in exchange rates and an overly optimistic valuation of the payback from the projects financed by the debt can make it difficult for countries to repay sovereign debt. # # The most important risk in sovereign debt is the risk of default by the issuing country. For this reason, countries with stable economies and political systems are considered to be less of a default risk in comparison to countries with a history of instability # # Sovereign Domestic Debt is a debt owed to lenders who are within the country. fig,ax = plt.subplots(figsize = (20,10)) sns.countplot(money['country'],hue= money['domestic_debt_in_default'],ax=ax) plt.xlabel('countries') plt.ylabel('counts') plt.xticks(rotation=45) sovereign = money[['year','country','domestic_debt_in_default','exch_usd','banking_crisis']] sovereign = sovereign[(sovereign['country']=='Angola') | (sovereign['country']=='Zimbabwe') |(sovereign['country']=='Egypt')] plt.figure(figsize=(20,15)) count = 1 for country in sovereign.country.unique(): plt.subplot(len(sovereign.country.unique()),1,count) subset = sovereign[(sovereign['country'] == country)] sns.lineplot(subset['year'],subset['domestic_debt_in_default'],ci=None) plt.scatter(subset['year'],subset["banking_crisis"], color='coral', label='Banking Crisis') plt.subplots_adjust(hspace=0.6) plt.xlabel('Years') plt.ylabel('Sovereign Domestic Debt Defaults/Banking Crisis') plt.title(country) count+=1 # # Sovereign External Debt Default # External debt is the portion of a country's debt that was borrowed from foreign lenders, including commercial banks, governments, or international financial institutions. These loans, including interest, must usually be paid in the currency in which the loan was made. To earn the needed currency, the borrowing country may sell and export goods to the lender's country. # # A debt crisis can occur if a country with a weak economy is not able to repay the external debt due to the inability to produce and sell goods and make a profitable return. # # If a nation is unable or refuses to repay its external debt, it is said to be in a sovereign default. This can lead to the lenders withholding future releases of assets that might be needed by the borrowing nation. Such instances can have a rolling effect, wherein the borrower’s currency collapses and that nation’s overall economic growth is stalled. fig,ax = plt.subplots(figsize = (20,10)) sns.countplot(money['country'],hue= money['sovereign_external_debt_default'],ax=ax) plt.xlabel('countries') plt.ylabel('counts') plt.xticks(rotation=50) # + sovereign_ext = money[['year','country', 'sovereign_external_debt_default', 'banking_crisis']] sovereign_ext = sovereign_ext[(sovereign_ext['country'] == 'Central African Republic') | (sovereign_ext['country'] == 'Ivory Coast') | (sovereign_ext['country']=='Zimbabwe') | (sovereign_ext['country'] == 'Egypt')] plt.figure(figsize=(20,15)) count = 1 for country in sovereign_ext.country.unique(): plt.subplot(len(sovereign_ext.country.unique()),1,count) subset = sovereign_ext[(sovereign_ext['country'] == country)] sns.lineplot(subset['year'],subset['sovereign_external_debt_default'],ci=None) plt.scatter(subset['year'],subset["banking_crisis"], color='coral', label='Banking Crisis') plt.subplots_adjust(hspace=0.6) plt.xlabel('Years') plt.ylabel('Sovereign Ext Debt Defaults/Banking Crisis') plt.title(country) count+=1 # - # currency crisis fig,ax = plt.subplots(figsize=(20,10)) sns.countplot(money['country'],hue=money['currency_crises'],ax = ax) plt.xlabel('countries') plt.ylabel('counts') plt.xticks(rotation = 45) # # Inflation Crisis # As prices rise, a single unit of currency loses value as it buys fewer goods and services. This loss of purchasing power impacts the general cost of living for the common public which ultimately leads to a deceleration in economic growth. # # Inflation is a quantitative measure of the rate at which the average price level of a basket of selected goods and services in an economy increases over a period of time. It is the constant rise in the general level of prices where a unit of currency buys less than it did in prior periods. # # Let's look at the countries that were accounted for an inflation crisis fig,ax = plt.subplots(figsize=(20,10)) sns.countplot(money['country'],hue=money['inflation_crises'],ax=ax) plt.xlabel('countries') plt.ylabel('counts') # Most commonly used inflation indexes are the Consumer Price Index (CPI). Let's look at the Annual CPI for the three countries to see if we can derive any insights. # + infla = money[['year','country', 'inflation_crises', 'inflation_annual_cpi', 'banking_crisis']] infla = infla[(infla['country'] == 'Angola') | (infla['country'] == 'Zambia') | (infla['country']=='Zimbabwe') | (infla['country']=='Egypt')] infla = infla.replace(to_replace=2, value=1, regex=False) plt.figure(figsize=(20,15)) count = 1 for country in infla.country.unique(): plt.subplot(len(infla.country.unique()),1,count) subset = infla[(infla['country'] == country)] sns.lineplot(subset['year'],subset['inflation_crises'],ci=None) plt.scatter(subset['year'],subset["banking_crisis"], color='coral', label='Banking Crisis') plt.subplots_adjust(hspace=0.6) plt.xlabel('Years') plt.ylabel('Inflation Crisis/Banking Crisis') plt.title(country) count+=1 # - #A look at currency rates sns.set_style('whitegrid') plt.figure(figsize=(8,5)) sns.lineplot(x='year',y='exch_usd',hue='country',data=money,palette = 'colorblind') plt.xlabel('Year') plt.ylabel('Exchange Rate') display() # Observations: # # Some countries have relatively lower exchange rate than other countries. Countries like South Africa, Zambia, Egypt and Morocco has relatively lower exchange rate (It is hard to interpret with the above graph, Let's break it down the exchange rate for each country in the next graph) # The exchange rate is almost zero for all the countries before 1940. This might be because the value is not recorded or a new currency had been adopted by the countries. (Further analysis required) # There are tremendous spikes in the exchange rate Angola and Zimbabwe. This might indicate an economic breakdown. ### Let's break it down further sns.set_style('whitegrid') fig, axes = plt.subplots(ncols = 4,nrows=4,figsize=(18,12),dpi=60) axes = axes.flatten() for i,ax in zip(money['country'].unique(),axes): sns.lineplot(x='year',y='exch_usd',hue='independence',data=money[money['country'] == i],ax=ax) ax.set_xlabel('year') ax.set_ylabel('exchange rate') ax.set_title('{}'.format(i)) ax.get_legend().remove() handles, labels = ax.get_legend_handles_labels() fig.legend(handles, labels, loc=1) fig.subplots_adjust(top=0.95) for i in range(13,16): fig.delaxes(axes[i]) plt.tight_layout() # Observations: # # All the countries had a good exchange rate before independence. This is because, most of the countries would have opted for new currency system after independece. For example, Tunisian dinar was introduced in 1960 and the Algerian dinar was introduced in 1964 (Ref:Wikipedia). # Egypt has been an independent country since 1850s. However it's exchange rate has started increasing from 1970s. Let's consider Egypt as a special case in respective to independence. # The exchange rate had gone up after the independence for almost all the countries expect Tunisia. Except Tunisia and Ivory coast, the exchange rate for all the countries have been increasing from the independence with some fluctuations. # There are some sudden spikes in the exchange rate. Angolan Kwanza - In 1999, a second currency was introduced in Angola called the kwanza and it suffered early on from high inflation (Wikipedia). Tunisian dinar was introduced in 1960, hence a spike. #Number of inflation crisis by Country money.groupby('country').agg({'inflation_crises':'sum'}).sort_values('inflation_crises',ascending=False) money.groupby('country').agg({'currency_crises':'sum'}).sort_values('currency_crises',ascending=False) money.groupby('country').agg({'banking_crisis':'count'}).sort_values('banking_crisis',ascending=False) money['banking_crisis'].value_counts('Egypt')
financial crisis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Autos dataset cleaning project import numpy as np import pandas as pd pd.set_option('display.max_columns',999) autos = pd.read_csv('autos.csv', encoding = 'Windows-1252') autos.head() autos.info() # Load dataset and see what info has autos.columns autos.columns = ['dateCrawled', 'name', 'seller', 'offerType', 'price', 'abtest', 'vehicleType', 'registration_year', 'gearbox', 'powerPS', 'model', 'odometer', 'registration_month', 'fuelType', 'brand', 'unrepaired_damage', 'ad_created', 'nrOfPictures', 'postalCode', 'lastSeen'] autos.head() # Changed column names to snakecase autos.describe(include='all') # |Column|Comment| # |:-:|:-:| # |dateCrawled|ok| # |name| ok| # |**seller**| **only two values**| # |**offerType**| **only two values**| # |**price**|** take of the dollar sign**| # |**abtest**| **only two values**| # |vehicleType| ok| # |**registration_year**| **needs cleaning**| # |gearbox| only 2 values| # |**powerPS**| **needs reviewing**| # |model| ok| # |**odometer**| **needs cleaning**| # |**registration_month**| ** needs cleaning**| # |fuelType| ok| # |brand| ok| # |unrepaired_damage|ok| # |**ad_created**|**needs cleaning**| # |**nrOfPictures**| **one value**| # |**postalCode**|**needs cleaning**| # |lastSeen|ok| autos['odometer_km'] = autos.odometer.str.replace('km','') autos['odometer_km'] = autos.odometer_km.str.replace(',','') autos['odometer_km'] = autos['odometer_km'].astype(int) autos['price_usd'] = autos.price.str.replace('$','') autos['price_usd'] = autos.price_usd.str.replace(',','') autos['price_usd'] = autos['price_usd'].astype(int) autos.describe(include = 'all') autos.odometer_km.describe() autos.odometer_km.value_counts().sort_index(ascending = False) autos[autos['odometer_km'].between(5000,20000)] autos.price_usd.describe() autos.price_usd.value_counts().sort_index(ascending=False) # ### Taking off the values from 1.5 the lower and upper interquartil range min_range = 2950 - (2950 - 1100) * 1.5 min_range max_range = 2950 + (7200 - 2950) * 1.5 max_range autos = autos[autos['price_usd'].between(min_range, max_range)] autos.describe(include='all') # ### The values from the price are more regular now, the odometer still have a strong orientation to the 150k value autos['dateCrawled'].str[:10].value_counts(normalize=True, dropna=False).sort_index() autos['ad_created'].str[:10].value_counts(normalize=True, dropna=False).sort_index() autos['lastSeen'].str[:10].value_counts(normalize=True, dropna=False).sort_index() autos['registration_year'].value_counts().sort_index() # ### The dates from the crawling are in 2016, and it was done in just 1 month. # ### The ad creation go from 2015 to 2016 # ### If the creation of the ad was back in 2016, there make no sense that the model of the vehicle are 2018 and forward. People is not filling the form accordingly # autos[(autos['registration_year'] < 1900) | (autos['registration_year'] > 2016)]['registration_year'].value_counts().sort_index() autos[autos['registration_year'].between(1910,2000)]['registration_year'].value_counts().sort_index() autos = autos[autos['registration_year'].between(1960,2016)] # ### Remove the values before 1960 because older vehicles may be a misstype. The right thing to do was to review the name of the vehicles and check if that model was available at that year. But since I didn't have the time, I decided to make a Salomonic decision # ### Remove the values after 2016 because the ad was created in 2016, there can't be vehicles from the future. The right thing to do was to check if the registration year matches the model and the ad creation date. autos.groupby(by='brand')[['odometer_km', 'price_usd']].mean() brands_counts = autos.brand.value_counts() # ### I choose the top 20 brands to make the analysis brands_price = {} brands_mileage = {} for i in brands_counts.index[:20]: brands_mileage[i] = brands_mileage.get(i, \ autos[autos['brand'] == i]['odometer_km'].mean()) brands_price[i] = brands_price.get(i, \ autos[autos['brand'] == i]['price_usd'].mean()) # brands_counts.index[:20] brands_price # ### We see that the skoda is the most expensive ... on average brands_mileage price_serie = pd.Series(brands_price) mileage_serie = pd.Series(brands_mileage) price_serie mileage_serie df = pd.DataFrame(price_serie, columns=['mean_price']) df['mean_mileage'] = pd.DataFrame(mileage_serie) df # ### The mileage from Audi, BMW and Mercedes are among the highest, so is the price maybe because this autos are top brands.
Dataquest/Basics pandas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # <a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/51883694976/in/album-72177720296706479/" title="week1_schedule"><img src="https://live.staticflickr.com/65535/51883694976_cc33017f67.jpg" width="359" height="500" alt="week1_schedule"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script> # # # # Session 1: Clarusway Mini-Bootcamp # # * General Information About Python # * First Program ("Hello world") # * Matter of Quotes and PEP 8 conventions # ### Useful Links # # * [This Notebook on Colab](https://colab.research.google.com/github/4dsolutions/bootcamp/blob/main/session1.ipynb) # * [This Notebook on nbviewer](https://nbviewer.org/github/4dsolutions/bootcamp/blob/main/session1.ipynb) # * [Python.org Home Page](https://www.python.org) # * [Beginner's Guide](https://wiki.python.org/moin/BeginnersGuide) # * [Python PEPs](https://www.python.org/dev/peps/) # * [PEP 8](https://www.python.org/dev/peps/pep-0008/) # * [PEP8.org](https://pep8.org) # * [Standard Library Modules](https://docs.python.org/3/library/index.html) # * [Formatting Mini-Language](https://docs.python.org/3/library/string.html#formatspec) # * [Python Documentation](https://docs.python.org/3/) # * [Anaconda Home Page](https://anaconda.org) # * [Jupyter Project](https://jupyter.org/) # * [Markdown Cheat Sheet](https://notebook.community/tschinz/iPython_Workspace/00_Admin/CheatSheet/Markdown%20CheatSheet) # * [Repl.it](https://replit.com/site/ide) # * [Course Album](https://flic.kr/s/aHBqjzCs82) # * [Course Repository](https://github.com/4dsolutions/bootcamp) # # *curate more links!* # ### Glossary of Terms # # * localhost: your own computer, with IP number 127.0.0.1 # * ASCII: American Standard Code for Information Interchange, 256 glyphs max. # * BDFL: Guido's old title, Benevolent Dictator for Life (he's now BDFL emeritus) # * Python: a general purpose, object-oriented computer language # * Python 3: any 3.x version of Python # * Python 2: any 2.x version of Python (2.7.x is the last) # * REPL: read, evaluate, print loop (interactive shell) # * SQL: Structured Query Language # * TCP/IP: Transmission Control Protocol/Internet Protocol. How computers talk to each other on the internet. # * Unicode: supercedes ASCII, millions of glyphs (including emoji) # # *add more glossary terms as the course progresses* # ## General Information # # Python was designed by <NAME> for a technical audience already well-versed in computer skills. Unlike BASIC, which was designed as a "teaching language". However, Python is elegant and expressive enough to attract beginners and professionals alike. Guido always intended that Python should help make "computer programming for everybody" a reality as his [1999 CP4E proposal](https://www.python.org/doc/essays/cp4e/) clearly shows. # # Python was originally named for the British comedy troupe Monty Python. References to Monty Python are scattered throughout the documentation. For example IDLE, the IDE that comes with Python, is a pun on Eric Idle. # # The rich, yet inexpensive (as in mostly free), ecosystem surrounding Python attracts the best and brightest from many walks of life, who then freely contribute, thereby adding yet more value to our Python World. # # Every computer language has its pedigree and story. Python's story includes the great leap forward from 2.x to 3.x, breaking backward compatibility, at a time when Python was already popular within industry. # # The Jupyter Project is one of those value-adding 3rd party tools that brings people flocking to Python. It grew out of the I-Python project, which includes the I-Python console and its "magics". # # "3rd party" means "not part of the Standard Library". The [Python Package Index](https://pypi.org/) is a catalog of freely available 3rd party projects. # # Slides: In Class Python Basics # ### What Should I Be Doing Now? # # As this is a first class, it's fine to just listen and see what the work environment will be. If you are able to boot up JupyterLab, locally on your own machine, feel free to do so. Use the Notebook to take notes. # # I recommend extending the Useful Links and Glossary sections using Markdown, and later turning that text into Python dictionaries (after you learn what those are). # ### How Do We Interact with Python? # # In several ways. # # * Invisibly: we don't always know, when using a website, whether Python is playing a role. # * Through the Command Line: Enter `python` on the command line to boot into a Python shell ([REPL](https://en.wikipedia.org/wiki/Read%E2%80%93eval%E2%80%93print_loop)). Alternatively, enter `python some_module.py` to run the module. # * Through an IDE: many interactive development environments are good for Python development. Try them out! # * Inside a Notebook: like here, like in Google colab. Sometimes we use Notebooks in read-only mode, as through nbviewer. Mostly we will use them on localhost. # * Inside an application: some applications are scriptable from the inside using Python e.g.: Rhino3D, Blender, ESRI products. # IDEs: # # <img src="https://miro.medium.com/max/1600/0*5vSccKOZ9Y_aNcVN.png" alt="Python IDEs" width="415" height="146"> # # <a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/51440394003/in/dateposted-public/" title="VScode"><img src="https://live.staticflickr.com/65535/51440394003_14bbe49dd2_o.png" width="318" height="159" alt="VScode"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script> # # A question to ask yourself: do I need my IDE to work in mutiple languages or only in Python? You may use different IDEs for different types of work. New IDEs are always in the works. # # Exploring Python copyright # How is it that Python knows how to provide copyright info in response to this one instruction? Even without importing anything, Python has access to ```__builtins__```. This module contains Python's core vocabulary and is available upon boot up. "copyright" in dir(__builtins__) a = 2 + 2 a # ## First Program print("Hello, World") print("<NAME>") print("مرحبا بالعالم") # ## Quotes and PEP 8 'This ' "is " 'OK ' """too""" print(""" this might not be what you wanted """) print(r"this\t might be\n" "closer to what you\n" "expected") # Escaped quotes: print("You said: \"If I want quotes to be taken literally, I can 'escape' them.\"") # Raw strings: print(r"This is a raw string \n\n") # Format strings: v = "format" print(f"This is a {v} string") b"this is a byte string" # ## Extra Fun import this import antigravity # run this yourself # See: [Python Environment (also XKCD)](https://xkcd.com/1987/) # Books: # # <a data-flickr-embed="true" href="https://www.flickr.com/photos/kirbyurner/49220514452/in/album-72177720296706479/" title="Python Shelf"><img src="https://live.staticflickr.com/65535/49220514452_0e43b2888b.jpg" width="500" height="500" alt="Python Shelf"></a><script async src="//embedr.flickr.com/assets/client-code.js" charset="utf-8"></script> # # Many good ones are online, and some of those are free. Consider subscribing to an online book source.
session1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/ram-anand/100daysofMLCode/blob/master/simplify_dot_product_of_vectors_and_matrices.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="ZobQ1DGwsVya" # # How to understand matrix multiplication in terms of vectors # # All you need to know is # - scalar multiplication of matrix and dot product # + [markdown] id="Q2ZwmYgZGUoF" # ## Scalar multiplication with vector and matrix # # $$ # k . \begin{bmatrix} a & b \end{bmatrix} = \begin{bmatrix} k.a & k.b \end{bmatrix} # $$ # # $$ # k . \begin{bmatrix} a \\ c \end{bmatrix} = \begin{bmatrix} k.a \\ k.c \end{bmatrix} # $$ # # $$ # k . \begin{bmatrix} a & b \\ c & d \end{bmatrix} = \begin{bmatrix} k.a & k.b \\ k.c & k.d \end{bmatrix} # $$ # + [markdown] id="9UPWkIqEGU7k" # ## Product of two vectors: # $$ # \begin{bmatrix} a \end{bmatrix} . \begin{bmatrix} p \end{bmatrix} # = a.p # $$ # # + [markdown] id="Orn1JnrFJlfe" # ### **row-vector x column-vector:** called dot product or inner product # # # $$ # \begin{bmatrix} a & b \end{bmatrix} . \begin{bmatrix} p \\ r \end{bmatrix} # = a.p + b.r # $$ # + [markdown] id="Cn_YmaEpJptS" # ### **column vector x row vector:** called scalar product or outer product # # $$ # \begin{bmatrix} a \\ c \end{bmatrix} . \begin{bmatrix} p & q \end{bmatrix} # = \begin{bmatrix} \begin{bmatrix} a \\ c \end{bmatrix} .p & \begin{bmatrix} a \\ c \end{bmatrix} .q \end{bmatrix} = \begin{bmatrix} a. \begin{bmatrix} p & q \end{bmatrix} \\ c . \begin{bmatrix} p & q \end{bmatrix} \end{bmatrix} # $$ # + [markdown] id="kIEUlsMWsV2s" # ## Product of the two matrix # # Matrix multiplication of two matrices A & B is either written as AB or A.B (dot product) # # $$ # A = \begin{bmatrix} a & b \\ c & d \end{bmatrix}, \ B = \begin{bmatrix} p & q \\ r & s \end{bmatrix} # $$ # # $$ # AB = A.B = \begin{bmatrix} a & b \\ c & d \end{bmatrix} . \begin{bmatrix} p & q \\ r & s \end{bmatrix} = \begin{bmatrix} ap+br & aq+bs\\ cp+dr & cq + ds \end{bmatrix} # $$ # + [markdown] id="zOeJFzhstBj4" # # # #### Think of it as a dot product: # # $$ # c_1 = \begin{bmatrix} a \\ c \end{bmatrix}, c_2 = \begin{bmatrix} b \\ d \end{bmatrix} , r_1 = \begin{bmatrix} p & q \end{bmatrix}, r_2 = \begin{bmatrix} r & s \end{bmatrix} # $$ # # Now the multiplication: # # $$ # \begin{bmatrix} c_1 & c_2 \end{bmatrix} . \begin{bmatrix} r_1 \\ r_2 \end{bmatrix} = c_1.r_1 + c_2.r_2 = \begin{bmatrix} a \\ c \end{bmatrix} . \begin{bmatrix} p & q \end{bmatrix} + \begin{bmatrix} b \\ d \end{bmatrix} . \begin{bmatrix} r & s \end{bmatrix} # $$ # + [markdown] id="NLeUAMAQtBmn" # ### Think of it as a scalar product: # # $$ # r_1 = \begin{bmatrix} a & b \end{bmatrix}, r_2 = \begin{bmatrix} c & d \end{bmatrix}, c_1 = \begin{bmatrix} p \\ r \end{bmatrix}, c_2 = \begin{bmatrix} q \\ s \end{bmatrix} # $$ # # Now the multiplication: # # #### 1. First take (first matrix is multiplied by scalar): # # # $$ # A . \begin{bmatrix} c_1 & c_2 \end{bmatrix} = \begin{bmatrix} A.c_1 & A.c_2 \end{bmatrix} # $$ # # More elaborately: # # $$ # \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix} . \begin{bmatrix} c_1 & c_2 \end{bmatrix} = \begin{bmatrix} \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix}. c_1 & \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix} . c_2 \end{bmatrix} # $$ # # #### 2. Second take (second matrix is multiplied by scalar): # # $$ # \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix} . B = \begin{bmatrix} r_1.B \\\ r_2.B \end{bmatrix} # $$ # # More elaborately: # # $$ # \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix} . \begin{bmatrix} c_1 & c_2 \end{bmatrix} = \begin{bmatrix} r_1 . \begin{bmatrix} c_1 & c_2 \end{bmatrix} \\ r_2 . \begin{bmatrix} c_1 & c_2 \end{bmatrix} \end{bmatrix} # $$ # # #### 3. Third take (normal matrix multiplication): # # > Notice each element is the dot product of vectors # # # $$ # \begin{bmatrix} r_1 \\\ r_2 \end{bmatrix} . \begin{bmatrix} c_1 & c_2 \end{bmatrix} = \begin{bmatrix} r_1.c_1 & r_1.c_2 \\ r_2.c_1 & r_2.c_2 \end{bmatrix} = \begin{bmatrix} \begin{bmatrix} a & b \end{bmatrix}.\begin{bmatrix} p \\ r \end{bmatrix} & \begin{bmatrix} a & b \end{bmatrix}.\begin{bmatrix} q \\ s \end{bmatrix} \\ \begin{bmatrix} c & d \end{bmatrix}.\begin{bmatrix} p \\ r \end{bmatrix} & \begin{bmatrix} c & d \end{bmatrix}.\begin{bmatrix} q \\ s \end{bmatrix} \end{bmatrix} # $$ # #
simplify_dot_product_of_vectors_and_matrices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split, cross_val_score import matplotlib.pyplot as plt # df = pd.read_csv('https://archive.ics.uci.edu/ml/' # 'machine-learning-databases' # '/breast-cancer-wisconsin/wdbc.data',header=None) df = pd.read_csv('wdbc.data', header=None) df # + X = df.loc[:,2:].values y = df.loc[:,1].values le = LabelEncoder() y = le.fit_transform(y) print(f"New classes {le.transform(le.classes_)} are equivalente to old clases {le.classes_}, respectively") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0, stratify=y) # + # Feature extraction from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler stdsc = StandardScaler() X_train_std = stdsc.fit_transform(X_train) # equivalente to: stdsc.fit(X_train) and then X_train_std = stdsc.transform(X_train) X_test_std = stdsc.transform(X_test) pca = PCA(n_components=2) # PCA is an unsupervised feature extraction strategy since class labels are not taken into account X_train_pca = pca.fit_transform(X_train_std) X_test_pca = pca.fit_transform(X_test_std) # + ######################## Bagging ####################### from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import BaggingClassifier from sklearn.metrics import accuracy_score tree = DecisionTreeClassifier(criterion='entropy',random_state=1,max_depth=None) bag = BaggingClassifier(base_estimator=tree, n_estimators=500, max_samples=1.0, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=1, random_state=1) tree = tree.fit(X_train_pca, y_train) y_train_pred = tree.predict(X_train_pca) y_test_pred = tree.predict(X_test_pca) tree_train = accuracy_score(y_train, y_train_pred) tree_test = accuracy_score(y_test, y_test_pred) print('Decision tree train/test accuracies %.3f/%.3f' % (tree_train, tree_test)) bag = bag.fit(X_train_pca, y_train) y_train_pred = bag.predict(X_train_pca) y_test_pred = bag.predict(X_test_pca) bag_train = accuracy_score(y_train, y_train_pred) bag_test = accuracy_score(y_test, y_test_pred) print('Bagging train/test accuracies %.3f/%.3f' % (bag_train, bag_test)) # Plots x_min = X_train_pca[:, 0].min() - 1 x_max = X_train_pca[:, 0].max() + 1 y_min = X_train_pca[:, 1].min() - 1 y_max = X_train_pca[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.05), np.arange(y_min, y_max, 0.05)) f, axarr = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(8, 3)) for idx, clf, tt in zip([0, 1], [tree, bag], ['Decision tree', 'Bagging']): clf.fit(X_train_pca, y_train) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) axarr[idx].contourf(xx, yy, Z, alpha=0.3) axarr[idx].scatter(X_train_pca[y_train==0, 0], X_train_pca[y_train==0, 1], c='blue', marker='^') axarr[idx].scatter(X_train_pca[y_train==1, 0], X_train_pca[y_train==1, 1], c='green', marker='o') axarr[idx].set_title(tt) axarr[0].set_ylabel('PC2', fontsize=12) plt.tight_layout() plt.text(0, -0.2, s='PC1', ha='center', va='center', fontsize=12, transform=axarr[1].transAxes) plt.show() # + ################ Adaptive Boosting ################## from sklearn.ensemble import AdaBoostClassifier tree = DecisionTreeClassifier(criterion='entropy', random_state=1, max_depth=1) ada = AdaBoostClassifier(base_estimator=tree, n_estimators=500, learning_rate=0.1, random_state=1) tree = tree.fit(X_train, y_train) y_train_pred = tree.predict(X_train) y_test_pred = tree.predict(X_test) tree_train = accuracy_score(y_train, y_train_pred) tree_test = accuracy_score(y_test, y_test_pred) print('Decision tree train/test accuracies %.3f/%.3f' % (tree_train, tree_test)) ada = ada.fit(X_train, y_train) y_train_pred = ada.predict(X_train) y_test_pred = ada.predict(X_test) ada_train = accuracy_score(y_train, y_train_pred) ada_test = accuracy_score(y_test, y_test_pred) print('AdaBoost train/test accuracies %.3f/%.3f' % (ada_train, ada_test)) # Plots x_min = X_train_pca[:, 0].min() - 1 x_max = X_train_pca[:, 0].max() + 1 y_min = X_train_pca[:, 1].min() - 1 y_max = X_train_pca[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.05), np.arange(y_min, y_max, 0.05)) f, axarr = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(8, 3)) for idx, clf, tt in zip([0, 1], [tree, ada], ['Decision tree', 'AdaBoost']): clf.fit(X_train_pca, y_train) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) axarr[idx].contourf(xx, yy, Z, alpha=0.3) axarr[idx].scatter(X_train_pca[y_train==0, 0], X_train_pca[y_train==0, 1], c='blue', marker='^') axarr[idx].scatter(X_train_pca[y_train==1, 0], X_train_pca[y_train==1, 1], c='green', marker='o') axarr[idx].set_title(tt) axarr[0].set_ylabel('PC2', fontsize=12) plt.tight_layout() plt.text(0, -0.2, s='PC1', ha='center', va='center', fontsize=12, transform=axarr[1].transAxes) plt.show()
ensemble/ensemble.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- % matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns import csv # + # this sets up the rest of the notebook! # - df = pd.read_csv('school_locations.csv') # + #this opens the csv file into the notebook! # - df.info # + #this will introduce all of the contained info within the csv file! # - df.head() # + # this is the first 5 rows of the information within the csv # - df.NTA_NAME.value_counts() # + #these are the names of the different neighborhoods within New York. This is part of what we'll be looking at. # - df.groupby('NTA_NAME')['LOCATION_CATEGORY_DESCRIPTION'].value_counts() # + # this shows the amount of schools within each neighborhood in Manhattan # - downtown = df[(df.NTA_NAME == 'West Village') | (df.NTA_NAME == 'East Village') | (df.NTA_NAME == 'Chinatown') | (df.NTA_NAME == 'Lower East Side')] # + #this is making refinining the data by making 'downtown' a variable # - downtown.NTA_NAME.describe() # + # this is supposed to list the amount of schools in each neighborhood. # - downtown.NTA_NAME.count() grades = df[(df.LOCATION_CATEGORY_DESCRIPTION == 'K-8') | (df.LOCATION_CATEGORY_DESCRIPTION == 'High school') | (df.LOCATION_CATEGORY_DESCRIPTION == 'Secondary School') | (df.LOCATION_CATEGORY_DESCRIPTION == 'Elementary')] # + #this is refining the data by making 'grades' a variable # - grades.LOCATION_CATEGORY_DESCRIPTION.value_counts() # + # this shows us the amount of schools within each category labelled # - df.groupby('LOCATION_CATEGORY_DESCRIPTION')['NTA_NAME'].count() # + # this provides a description for the amount of each school within new york city. # - df.groupby('LOCATION_CATEGORY_DESCRIPTION')['NTA_NAME'].count().plot(kind = 'bar', color = 'purple', title = 'New York City School Catagories', figsize = (10, 5)) # + # this is a bar graph depicting the type of schools within New York City # - df.groupby('LOCATION_CATEGORY_DESCRIPTION')['NTA_NAME'].count().plot(kind = 'box', color = 'purple', title = 'New York City School Catagories', figsize = (10, 5)) # + #This is a box plot for the different types of schools within new york city
nyc_schools.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Answers to Pandas Exercises # # There are many ways to solve each of these! # # Run each cell below in little incremental bits to really see how the code blocks work. # + import pandas as pd import pandas_datareader as pdr # IF NECESSARY, from terminal: pip install pandas_datareader import datetime import numpy as np start = datetime.datetime(2017, 1, 1) # you can specify start and end dates this way end = datetime.datetime(2021, 1, 27) macro_df = pdr.data.DataReader(['GDP','CPIAUCSL','UNRATE'], 'fred', start, end) # - # ## Part 1 # # During class, I used this dataframe to go over [Pandas vocab](02b_pandasVocab), and we show how to # - access 1 variable (note: `pd` calls this a "series" object, which is a 1D object instead of a 2D object) # - access multiple vars # - access, print, and change column names # - access, print, reset, and set the index # # Questions: # - Q0: Do each of the four new golden rules for initial data exploration, from the lecture. # - Q1: What is the second series above? # - Q2: What is the frequency of the series? # - Q3: What is the average ANNUAL GDP, based on the data? # + # do your work here def insufficient_but_starting_eda(df,cat_vars_list=None): ''' Parameters ---------- df : DATAFRAME cat_vars_list : LIST, optional A list of strings containing variable names in the dataframe for variables where you want to see the number of unique values and the 10 most common values. Likely used for categorical values. Returns ------- None. It simply prints. Description ------- This function will print a MINIMUM amount of info about a new dataframe. You should ****look**** at all this output below and consider the data exploration and cleaning questions from https://ledatascifi.github.io/ledatascifi-2021/content/03/02e_eda_golden.html#member Also LOOK at more of the data manually. Then write up anything notable you observe. TIP: put this function in your codebook to reuse easily. PROTIP: Improve this function (better outputs, better formatting). FEATURE REQUEST: optionally print the nunique and top 10 values under the describe matrix FEATURE REQUEST: optionally print more stats (percentiles) ''' print(df.head(), '\n---') print(df.tail(), '\n---') print(df.columns, '\n---') print("The shape is: ",df.shape, '\n---') print("Info:",df.info(), '\n---') # memory usage, name, dtype, and # of non-null obs (--> # of missing obs) per variable print(df.describe(), '\n---') # summary stats, and you can customize the list! if cat_vars_list != None: for var in cat_vars_list: print(var,"has",df[var].nunique(),"values and its top 10 most common are:") print(df[var].value_counts().head(10), '\n---') insufficient_but_starting_eda(macro_df,['UNRATE']) # - # ### Some answers # - Q0: What have we learned about the data? Anything to keep track of? (GDP is annual, others are quarterly) # - Q1: Inflation (CPI) # - Q2: Quarterly, but GDP is only annual # - Q3: 20,630 (trillion) # ## Part 2 # # - Q4: Download the annual *real* gdp from 1960 to 2018 from FRED and compute the average annual percent change # - Q5: Compute the average gdp percent change within *each decade* # # + # do your work here # v1: part2_df = pdr.data.DataReader(['GDPCA'], 'fred', 1960, 2018) part2_df['real_gdp_pct'] = part2_df['GDPCA'].pct_change() part2_df['real_gdp_pct'].mean() # prof notes: after students present, go through, bit by bit # then add comments (a la psuedo), # reiter access var: df[var], how methods apply to obj df[var].func() # - # v1.1: chain the last 2 lines together part2_df = pdr.data.DataReader(['GDPCA'], 'fred', 1960, 2018) part2_df['GDPCA'].pct_change().mean() # + # v1.2 that last line could be: (part2_df .pct_change() .mean() ) # explain reasons for chaining: readable, no temp objects # breaking this up into 3 lines is silly but shows chaining over multiple lines # how can we add the first line (the DL) into this? (next block) # - # v2.0: chaining - but involves a lambda function work around # don't cover until we go over lambdas ( # DL data pdr.data.DataReader(['GDPCA','GDPA'], 'fred', 1960, 2018) # create the var # syntax is: .assign(newname = fcn(df(var))) # try this: #.assign(sillyvar=1) # but this df doesn't have a name! how do we access it? # trick: "lambda" function. # here x is the object that assign is working on, # meaning what ever is produced right before .assign # which is just the **df** we DLed on the line above # # this is VERY COMMON in pandas chaining: # .assign(newvarname = lambda <tempnameforpriorobj>: <do stuff to tempnameforpriorobj> ) .assign(real_gdp_pct = lambda x: x['GDPCA'].pct_change()) # grab the var and take its mean ['real_gdp_pct'].mean() ) # v2.1: chaining - clean ( # DL data pdr.data.DataReader(['GDPCA','GDPA'], 'fred', 1960, 2018) # create var .assign(real_gdp_pct = lambda x: x['GDPCA'].pct_change()) # get mean value ['real_gdp_pct'].mean() ) # Q5: import numpy as np # v2.1: chaining - clean ( # DL data pdr.data.DataReader(['GDPCA','GDPA'], 'fred', 1960, 2018) # create var pct change .assign(real_gdp_pct = lambda x: x['GDPCA'].pct_change()) # get the decade from the index (do this BIT BY BIT) .reset_index() # turn it into variable # how to pull year out of date? # DATE is a datetime series. dt is a way to access properities of the date .assign(decade = lambda x: np.floor(x.DATE.dt.year/10)*10) # for each decade = groupby! .groupby('decade') # take mean ['real_gdp_pct'].mean() ) # ## Part 3 # # First, I'll load January data on unemployment, the Case-Shiller housing index, and median household income in three states (CA/MI/PA). # + # LOAD DATA AND CONVERT TO ANNUAL start = 1990 # pandas datareader can infer these are years end = 2018 macro_data = pdr.data.DataReader(['CAUR','MIUR','PAUR', # unemployment 'LXXRSA','DEXRSA','WDXRSA', # case shiller index in LA, Detroit, DC (no PA available!) 'MEHOINUSCAA672N','MEHOINUSMIA672N','MEHOINUSPAA672N'], # 'fred', start, end) macro_data = macro_data.resample('Y').first() # get's the first observation for each variable in a given year # CLEAN UP THE FORMATING SOMEWHAT macro_data.index = macro_data.index.year print("\n\n DATA BEFORE FORMATTING: \n\n") print(macro_data[:20]) # see how the data looks now? ugly variable names, but its an annual dataset at least macro_data.columns=pd.MultiIndex.from_tuples([ ('Unemployment','CA'),('Unemployment','MI'),('Unemployment','PA'), ('HouseIdx','CA'),('HouseIdx','MI'),('HouseIdx','PA'), ('MedIncome','CA'),('MedIncome','MI'),('MedIncome','PA') ]) print("\n\n DATA AFTER FORMATTING: \n\n") print(macro_data[:20]) # this is a dataset that is "wide", and now # the column variable names have 2 levels - var name, # and unit/state that variable applies to # - # - Q6: for each decade and state, report the average annual CHANGE (level, not percent) in unemployment # - Q7: for each decade and state, report the average annual PERCENT CHANGE in house prices and household income # + # do your work here: # let's pseudocode # q6 # get decade variable # get annual change/difference (level) in unemploy for each state # average unemploy for each state within decade # q7 # get decade variable # get annual pct change in house price and income for each state # average those for each state within decade # HEY! those are similar - let's combine: # get decade variable # get annual change in unemploy for each state # get annual pct change in house price and income for each state # average unemploy for each state within decade # + pd.set_option('display.float_format', '{:,.2f}'.format) ( # reformat the data to tall: macro_data.stack().swaplevel().sort_index().reset_index().rename(columns={'level_0':'state'}) # create vars <---- this is not even needed to explain this block! .assign( decade = lambda x: 10*np.floor(x['DATE']/10).astype(int), unemploy_diff = lambda x: x['Unemployment'].diff(), HouseIdx_pctch = lambda x: x['HouseIdx'].pct_change(), MedIncome_pctch = lambda x: x['MedIncome'].pct_change() ) # opt A for output: .pivot_table(index='decade', columns='state', values=['unemploy_diff','HouseIdx_pctch','MedIncome_pctch']) .multiply(100) # for more meaningful displays # opt B for output + formatting (here, as percentages) # .groupby(['state','decade']) # [['unemploy_diff','HouseIdx_pctch','MedIncome_pctch']].mean() # .multiply(100) # # note about this: unemp isn't a % diff, but a p.p # # so I make it explicit # .style.format({'HouseIdx_pctch': '{0:,.2f}%', # 'MedIncome_pctch': '{0:,.2f}%', # 'unemploy_diff': '{0:,.2f} p.p.'}) )
content/03/02i_answers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Environment (conda_u_net) # language: python # name: conda_u_net # --- # # U-Net: nuclei segmentation 1 # This is an implementation of a [Kaggle kernel](https://www.kaggle.com/c0conuts/unet-imagedatagenerator-lb-0-336/notebook) of a [U-net](https://arxiv.org/abs/1505.04597) claiming to achieve a baseline score of 0.227. # # %pwd # + import os import sys import random import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf # + from tqdm import tqdm from itertools import chain from skimage.io import imread, imsave, imshow, imread_collection, concatenate_images from skimage.transform import resize from skimage.morphology import label from keras.preprocessing import image from keras.models import Model, load_model from keras.layers import Input from keras.layers.core import Dropout, Lambda from keras.layers.convolutional import Conv2D, Conv2DTranspose from keras.layers.pooling import MaxPooling2D from keras.layers.merge import concatenate from keras.callbacks import EarlyStopping, ModelCheckpoint from keras import backend as K # - from utils.imaging import get_path, get_image_ids, label_mask, segmented_annotate from utils.evaluate import keras_mean_iou, submit_kaggle from utils import run_length_encoding # %matplotlib inline warnings.filterwarnings('ignore', category=UserWarning, module='skimage') # get model name form notebook name using javascript # + language="javascript" # IPython.notebook.kernel.execute('nb_name = ' + '"' + IPython.notebook.notebook_name + '"') # + notebook_name = os.path.splitext(os.path.basename(nb_name))[0] model_name = notebook_name + '.h5' model_path = get_path('models') + model_name submission_name = notebook_name + '.csv' submission_path = get_path('submission') + submission_name # - # ### 0. U-Net Parameters # + seed = 42 # model parameters BATCH_SIZE = 70 # the higher the better IMG_WIDTH = 128 # for faster computing on kaggle IMG_HEIGHT = 128 # for faster computing on kaggle IMG_CHANNELS = 3 TRAIN_PATH = get_path('data_train_1') TEST_PATH = get_path('data_test_1') # - # ** replace train path with fixed training data ** TRAIN_PATH = TRAIN_PATH.replace("stage1_train","stage1_train_fixed") # ### 1. Preprocess data # Get train and test IDs train_ids = get_image_ids(TRAIN_PATH) test_ids = get_image_ids(TEST_PATH) np.random.seed(10) # + # Get and resize train images and masks X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) print('Getting and resizing train images and masks ... ') sys.stdout.flush() for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)): path = TRAIN_PATH + id_ img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS] img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_train[n] = img mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool) for mask_file in next(os.walk(path + '/masks/'))[2]: mask_ = imread(path + '/masks/' + mask_file) mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1) mask = np.maximum(mask, mask_) Y_train[n] = mask # + # Get and resize test images X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8) sizes_test = [] print('Getting and resizing test images ... ') sys.stdout.flush() for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)): path = TEST_PATH + id_ img = imread(path + '/images/' + id_ + '.png')[:,:,:IMG_CHANNELS] sizes_test.append([img.shape[0], img.shape[1]]) img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True) X_test[n] = img # - # ### 2. Data augmentation # Creating the training Image and Mask generator image_datagen = image.ImageDataGenerator(shear_range=0.7, rotation_range=70, zoom_range=0.3, width_shift_range=0.3, height_shift_range=0.3, fill_mode='reflect') mask_datagen = image.ImageDataGenerator(shear_range=0.7, rotation_range=70, zoom_range=0.3, width_shift_range=0.3, height_shift_range=0.3, fill_mode='reflect') # + # Keep the same seed for image and mask generators so they fit together image_datagen.fit(X_train[:int(X_train.shape[0]*0.9)], augment=True, seed=seed) mask_datagen.fit(Y_train[:int(Y_train.shape[0]*0.9)], augment=True, seed=seed) x=image_datagen.flow(X_train[:int(X_train.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed) y=mask_datagen.flow(Y_train[:int(Y_train.shape[0]*0.9)],batch_size=BATCH_SIZE,shuffle=True, seed=seed) # + # Creating the validation Image and Mask generator image_datagen_val = image.ImageDataGenerator() mask_datagen_val = image.ImageDataGenerator() image_datagen_val.fit(X_train[int(X_train.shape[0]*0.9):], augment=True, seed=seed) mask_datagen_val.fit(Y_train[int(Y_train.shape[0]*0.9):], augment=True, seed=seed) x_val=image_datagen_val.flow(X_train[int(X_train.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed) y_val=mask_datagen_val.flow(Y_train[int(Y_train.shape[0]*0.9):],batch_size=BATCH_SIZE,shuffle=True, seed=seed) # - f, axarr = plt.subplots(2,2,figsize=(12,12)) axarr[0,0].imshow(x.next()[0].astype(np.uint8)) axarr[0,1].imshow(np.squeeze(y.next()[0].astype(np.uint8))) axarr[1,0].imshow(x_val.next()[0].astype(np.uint8)) axarr[1,1].imshow(np.squeeze(y_val.next()[0].astype(np.uint8))) #creating a training and validation generator that generate masks and images train_generator = zip(x, y) val_generator = zip(x_val, y_val) # ### 3. Initialise U-Net model # + # Build U-Net model inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) s = Lambda(lambda x: x / 255) (inputs) c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (s) c1 = Dropout(0.1) (c1) c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c1) p1 = MaxPooling2D((2, 2)) (c1) c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p1) c2 = Dropout(0.1) (c2) c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c2) p2 = MaxPooling2D((2, 2)) (c2) c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p2) c3 = Dropout(0.2) (c3) c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c3) p3 = MaxPooling2D((2, 2)) (c3) c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p3) c4 = Dropout(0.2) (c4) c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c4) p4 = MaxPooling2D(pool_size=(2, 2)) (c4) c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (p4) c5 = Dropout(0.3) (c5) c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c5) u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5) u6 = concatenate([u6, c4]) c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u6) c6 = Dropout(0.2) (c6) c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c6) u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6) u7 = concatenate([u7, c3]) c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u7) c7 = Dropout(0.2) (c7) c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c7) u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7) u8 = concatenate([u8, c2]) c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u8) c8 = Dropout(0.1) (c8) c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c8) u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8) u9 = concatenate([u9, c1], axis=3) c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (u9) c9 = Dropout(0.1) (c9) c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same') (c9) outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9) model = Model(inputs=[inputs], outputs=[outputs]) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[keras_mean_iou]) model.summary() # - # ### 4. Train U-Net model # Fit model earlystopper = EarlyStopping(patience=3, verbose=1) checkpointer = ModelCheckpoint(model_path, verbose=1, save_best_only=True) results = model.fit_generator(train_generator, validation_data=val_generator, validation_steps=10, steps_per_epoch=250, epochs=3, callbacks=[earlystopper, checkpointer] ) # ### 5. Predict with U-Net model # + # Predict on train, val and test model = load_model(model_path, custom_objects={'keras_mean_iou': keras_mean_iou}) preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1) preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1) preds_test = model.predict(X_test, verbose=1) # Threshold predictions preds_train_t = (preds_train > 0.5).astype(np.uint8) preds_val_t = (preds_val > 0.5).astype(np.uint8) preds_test_t = (preds_test > 0.5).astype(np.uint8) # - # Create list of upsampled test masks preds_test_upsampled = [] for i in range(len(preds_test)): preds_test_upsampled.append(resize(np.squeeze(preds_test[i]), (sizes_test[i][0], sizes_test[i][1]), mode='constant', preserve_range=True)) # sanity check on some training examples f, axarr = plt.subplots(2,3,figsize=(12,12)) ix1 = random.randint(0, len(preds_train_t)) ix2 = random.randint(0, len(preds_train_t)) axarr[0,0].imshow(X_train[ix1]) axarr[0,1].imshow(np.squeeze(Y_train[ix1])) axarr[0,2].imshow(np.squeeze(preds_train_t[ix1])) axarr[1,0].imshow(X_train[ix2]) axarr[1,1].imshow(np.squeeze(Y_train[ix2])) axarr[1,2].imshow(np.squeeze(preds_train_t[ix2])) # ### 7. Output image labels # Saving test labelled images for idx, image_id in tqdm(enumerate(test_ids), total=len(test_ids)): mask = preds_test_upsampled[idx] > 0.5 labels = label_mask(mask) imsave(get_path('output_test_1_lab_seg') + image_id + '.png', labels) # Saving test annotated images segmented_annotate(image_type = 'test', stage_num = 1) df = run_length_encoding.rle_images_in_dir(image_type = 'test', stage_num = 1) df.to_csv(submission_path, index=False) # ### 8. Kaggle submit message = "same is 180227_wg_u_net_2.ipynb with fixed data input" submit_string = submit_kaggle(notebook_name, submission_path, message) # !$submit_string
notebooks/180313_wg_u_net_3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # **Decision Trees 👨🏻‍💻** # ### From Scratch Implementation 🤔 # --- # + from __future__ import print_function, division import numpy as np import math from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib.cm as cmx import matplotlib.colors as colors import numpy as np def standardize(X): """ Standardize the dataset X """ X_std = X mean = X.mean(axis=0) std = X.std(axis=0) for col in range(np.shape(X)[1]): if std[col]: X_std[:, col] = (X_std[:, col] - mean[col]) / std[col] # X_std = (X - X.mean(axis=0)) / X.std(axis=0) return X_std def normalize(X, axis=-1, order=2): """ Normalize the dataset X """ l2 = np.atleast_1d(np.linalg.norm(X, order, axis)) l2[l2 == 0] = 1 return X / np.expand_dims(l2, axis) def train_test_split(X, y, test_size=0.5, shuffle=True, seed=None): """ Split the data into train and test sets """ if shuffle: X, y = shuffle_data(X, y, seed) # Split the training data from test data in the ratio specified in # test_size split_i = len(y) - int(len(y) // (1 / test_size)) X_train, X_test = X[:split_i], X[split_i:] y_train, y_test = y[:split_i], y[split_i:] return X_train, X_test, y_train, y_test def shuffle_data(X, y, seed=None): """ Random shuffle of the samples in X and y """ if seed: np.random.seed(seed) idx = np.arange(X.shape[0]) np.random.shuffle(idx) return X[idx], y[idx] def divide_on_feature(X, feature_i, threshold): """ Divide dataset based on if sample value on feature index is larger than the given threshold """ split_func = None if isinstance(threshold, int) or isinstance(threshold, float): split_func = lambda sample: sample[feature_i] >= threshold else: split_func = lambda sample: sample[feature_i] == threshold X_1 = np.array([sample for sample in X if split_func(sample)]) X_2 = np.array([sample for sample in X if not split_func(sample)]) return np.array([X_1, X_2]) def calculate_entropy(y): """ Calculate the entropy of label array y """ log2 = lambda x: math.log(x) / math.log(2) unique_labels = np.unique(y) entropy = 0 for label in unique_labels: count = len(y[y == label]) p = count / len(y) entropy += -p * log2(p) return entropy def mean_squared_error(y_true, y_pred): """ Returns the mean squared error between y_true and y_pred """ mse = np.mean(np.power(y_true - y_pred, 2)) return mse def calculate_variance(X): """ Return the variance of the features in dataset X """ mean = np.ones(np.shape(X)) * X.mean(0) n_samples = np.shape(X)[0] variance = (1 / n_samples) * np.diag((X - mean).T.dot(X - mean)) return variance def accuracy_score(y_true, y_pred): """ Compare y_true to y_pred and return the accuracy """ accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true) return accuracy def calculate_covariance_matrix(X, Y=None): """ Calculate the covariance matrix for the dataset X """ if Y is None: Y = X n_samples = np.shape(X)[0] covariance_matrix = (1 / (n_samples-1)) * (X - X.mean(axis=0)).T.dot(Y - Y.mean(axis=0)) return np.array(covariance_matrix, dtype=float) def calculate_correlation_matrix(X, Y=None): """ Calculate the correlation matrix for the dataset X """ if Y is None: Y = X n_samples = np.shape(X)[0] covariance = (1 / n_samples) * (X - X.mean(0)).T.dot(Y - Y.mean(0)) std_dev_X = np.expand_dims(calculate_std_dev(X), 1) std_dev_y = np.expand_dims(calculate_std_dev(Y), 1) correlation_matrix = np.divide(covariance, std_dev_X.dot(std_dev_y.T)) return np.array(correlation_matrix, dtype=float) class Plot(): def __init__(self): self.cmap = plt.get_cmap('viridis') def _transform(self, X, dim): covariance = calculate_covariance_matrix(X) eigenvalues, eigenvectors = np.linalg.eig(covariance) # Sort eigenvalues and eigenvector by largest eigenvalues idx = eigenvalues.argsort()[::-1] eigenvalues = eigenvalues[idx][:dim] eigenvectors = np.atleast_1d(eigenvectors[:, idx])[:, :dim] # Project the data onto principal components X_transformed = X.dot(eigenvectors) return X_transformed def plot_regression(self, lines, title, axis_labels=None, mse=None, scatter=None, legend={"type": "lines", "loc": "lower right"}): if scatter: scatter_plots = scatter_labels = [] for s in scatter: scatter_plots += [plt.scatter(s["x"], s["y"], color=s["color"], s=s["size"])] scatter_labels += [s["label"]] scatter_plots = tuple(scatter_plots) scatter_labels = tuple(scatter_labels) for l in lines: li = plt.plot(l["x"], l["y"], color=s["color"], linewidth=l["width"], label=l["label"]) if mse: plt.suptitle(title) plt.title("MSE: %.2f" % mse, fontsize=10) else: plt.title(title) if axis_labels: plt.xlabel(axis_labels["x"]) plt.ylabel(axis_labels["y"]) if legend["type"] == "lines": plt.legend(loc="lower_left") elif legend["type"] == "scatter" and scatter: plt.legend(scatter_plots, scatter_labels, loc=legend["loc"]) plt.show() # Plot the dataset X and the corresponding labels y in 2D using PCA. def plot_in_2d(self, X, y=None, title=None, accuracy=None, legend_labels=None): X_transformed = self._transform(X, dim=2) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] class_distr = [] y = np.array(y).astype(int) colors = [self.cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))] # Plot the different class distributions for i, l in enumerate(np.unique(y)): _x1 = x1[y == l] _x2 = x2[y == l] _y = y[y == l] class_distr.append(plt.scatter(_x1, _x2, color=colors[i])) # Plot legend if not legend_labels is None: plt.legend(class_distr, legend_labels, loc=1) # Plot title if title: if accuracy: perc = 100 * accuracy plt.suptitle(title) plt.title("Accuracy: %.1f%%" % perc, fontsize=10) else: plt.title(title) # Axis labels plt.xlabel('Principal Component 1') plt.ylabel('Principal Component 2') plt.show() # Plot the dataset X and the corresponding labels y in 3D using PCA. def plot_in_3d(self, X, y=None): X_transformed = self._transform(X, dim=3) x1 = X_transformed[:, 0] x2 = X_transformed[:, 1] x3 = X_transformed[:, 2] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(x1, x2, x3, c=y) plt.show() class Sigmoid(): def __call__(self, x): return 1 / (1 + np.exp(-x)) def gradient(self, x): return self.__call__(x) * (1 - self.__call__(x)) def make_diagonal(x): """ Converts a vector into an diagonal matrix """ m = np.zeros((len(x), len(x))) for i in range(len(m[0])): m[i, i] = x[i] return m # + class DecisionNode(): """Class that represents a decision node or leaf in the decision tree Parameters: ----------- feature_i: int Feature index which we want to use as the threshold measure. threshold: float The value that we will compare feature values at feature_i against to determine the prediction. value: float The class prediction if classification tree, or float value if regression tree. true_branch: DecisionNode Next decision node for samples where features value met the threshold. false_branch: DecisionNode Next decision node for samples where features value did not meet the threshold. """ def __init__(self, feature_i=None, threshold=None, value=None, true_branch=None, false_branch=None): self.feature_i = feature_i # Index for the feature that is tested self.threshold = threshold # Threshold value for feature self.value = value # Value if the node is a leaf in the tree self.true_branch = true_branch # 'Left' subtree self.false_branch = false_branch # 'Right' subtree # Super class of RegressionTree and ClassificationTree class DecisionTree(object): """Super class of RegressionTree and ClassificationTree. Parameters: ----------- min_samples_split: int The minimum number of samples needed to make a split when building a tree. min_impurity: float The minimum impurity required to split the tree further. max_depth: int The maximum depth of a tree. loss: function Loss function that is used for Gradient Boosting models to calculate impurity. """ def __init__(self, min_samples_split=2, min_impurity=1e-7, max_depth=float("inf"), loss=None): self.root = None # Root node in dec. tree # Minimum n of samples to justify split self.min_samples_split = min_samples_split # The minimum impurity to justify split self.min_impurity = min_impurity # The maximum depth to grow the tree to self.max_depth = max_depth # Function to calculate impurity (classif.=>info gain, regr=>variance reduct.) self._impurity_calculation = None # Function to determine prediction of y at leaf self._leaf_value_calculation = None # If y is one-hot encoded (multi-dim) or not (one-dim) self.one_dim = None # If Gradient Boost self.loss = loss def fit(self, X, y, loss=None): """ Build decision tree """ self.one_dim = len(np.shape(y)) == 1 self.root = self._build_tree(X, y) self.loss=None def _build_tree(self, X, y, current_depth=0): """ Recursive method which builds out the decision tree and splits X and respective y on the feature of X which (based on impurity) best separates the data""" largest_impurity = 0 best_criteria = None # Feature index and threshold best_sets = None # Subsets of the data # Check if expansion of y is needed if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) # Add y as last column of X Xy = np.concatenate((X, y), axis=1) n_samples, n_features = np.shape(X) if n_samples >= self.min_samples_split and current_depth <= self.max_depth: # Calculate the impurity for each feature for feature_i in range(n_features): # All values of feature_i feature_values = np.expand_dims(X[:, feature_i], axis=1) unique_values = np.unique(feature_values) # Iterate through all unique values of feature column i and # calculate the impurity for threshold in unique_values: # Divide X and y depending on if the feature value of X at index feature_i # meets the threshold Xy1, Xy2 = divide_on_feature(Xy, feature_i, threshold) if len(Xy1) > 0 and len(Xy2) > 0: # Select the y-values of the two sets y1 = Xy1[:, n_features:] y2 = Xy2[:, n_features:] # Calculate impurity impurity = self._impurity_calculation(y, y1, y2) # If this threshold resulted in a higher information gain than previously # recorded save the threshold value and the feature # index if impurity > largest_impurity: largest_impurity = impurity best_criteria = {"feature_i": feature_i, "threshold": threshold} best_sets = { "leftX": Xy1[:, :n_features], # X of left subtree "lefty": Xy1[:, n_features:], # y of left subtree "rightX": Xy2[:, :n_features], # X of right subtree "righty": Xy2[:, n_features:] # y of right subtree } if largest_impurity > self.min_impurity: # Build subtrees for the right and left branches true_branch = self._build_tree(best_sets["leftX"], best_sets["lefty"], current_depth + 1) false_branch = self._build_tree(best_sets["rightX"], best_sets["righty"], current_depth + 1) return DecisionNode(feature_i=best_criteria["feature_i"], threshold=best_criteria[ "threshold"], true_branch=true_branch, false_branch=false_branch) # We're at leaf => determine value leaf_value = self._leaf_value_calculation(y) return DecisionNode(value=leaf_value) def predict_value(self, x, tree=None): """ Do a recursive search down the tree and make a prediction of the data sample by the value of the leaf that we end up at """ if tree is None: tree = self.root # If we have a value (i.e we're at a leaf) => return value as the prediction if tree.value is not None: return tree.value # Choose the feature that we will test feature_value = x[tree.feature_i] # Determine if we will follow left or right branch branch = tree.false_branch if isinstance(feature_value, int) or isinstance(feature_value, float): if feature_value >= tree.threshold: branch = tree.true_branch elif feature_value == tree.threshold: branch = tree.true_branch # Test subtree return self.predict_value(x, branch) def predict(self, X): """ Classify samples one by one and return the set of labels """ y_pred = [self.predict_value(sample) for sample in X] return y_pred def print_tree(self, tree=None, indent=" "): """ Recursively print the decision tree """ if not tree: tree = self.root # If we're at leaf => print the label if tree.value is not None: print (tree.value) # Go deeper down the tree else: # Print test print ("%s:%s? " % (tree.feature_i, tree.threshold)) # Print the true scenario print ("%sT->" % (indent), end="") self.print_tree(tree.true_branch, indent + indent) # Print the false scenario print ("%sF->" % (indent), end="") self.print_tree(tree.false_branch, indent + indent) class XGBoostRegressionTree(DecisionTree): """ Regression tree for XGBoost - Reference - http://xgboost.readthedocs.io/en/latest/model.html """ def _split(self, y): """ y contains y_true in left half of the middle column and y_pred in the right half. Split and return the two matrices """ col = int(np.shape(y)[1]/2) y, y_pred = y[:, :col], y[:, col:] return y, y_pred def _gain(self, y, y_pred): nominator = np.power((y * self.loss.gradient(y, y_pred)).sum(), 2) denominator = self.loss.hess(y, y_pred).sum() return 0.5 * (nominator / denominator) def _gain_by_taylor(self, y, y1, y2): # Split y, y_pred = self._split(y) y1, y1_pred = self._split(y1) y2, y2_pred = self._split(y2) true_gain = self._gain(y1, y1_pred) false_gain = self._gain(y2, y2_pred) gain = self._gain(y, y_pred) return true_gain + false_gain - gain def _approximate_update(self, y): # y split into y, y_pred y, y_pred = self._split(y) # Newton's Method gradient = np.sum(y * self.loss.gradient(y, y_pred), axis=0) hessian = np.sum(self.loss.hess(y, y_pred), axis=0) update_approximation = gradient / hessian return update_approximation def fit(self, X, y): self._impurity_calculation = self._gain_by_taylor self._leaf_value_calculation = self._approximate_update super(XGBoostRegressionTree, self).fit(X, y) class RegressionTree(DecisionTree): def _calculate_variance_reduction(self, y, y1, y2): var_tot = calculate_variance(y) var_1 = calculate_variance(y1) var_2 = calculate_variance(y2) frac_1 = len(y1) / len(y) frac_2 = len(y2) / len(y) # Calculate the variance reduction variance_reduction = var_tot - (frac_1 * var_1 + frac_2 * var_2) return sum(variance_reduction) def _mean_of_y(self, y): value = np.mean(y, axis=0) return value if len(value) > 1 else value[0] def fit(self, X, y): self._impurity_calculation = self._calculate_variance_reduction self._leaf_value_calculation = self._mean_of_y super(RegressionTree, self).fit(X, y) class ClassificationTree(DecisionTree): def _calculate_information_gain(self, y, y1, y2): # Calculate information gain p = len(y1) / len(y) entropy = calculate_entropy(y) info_gain = entropy - p * \ calculate_entropy(y1) - (1 - p) * \ calculate_entropy(y2) return info_gain def _majority_vote(self, y): most_common = None max_count = 0 for label in np.unique(y): # Count number of occurences of samples with label count = len(y[y == label]) if count > max_count: most_common = label max_count = count return most_common def fit(self, X, y): self._impurity_calculation = self._calculate_information_gain self._leaf_value_calculation = self._majority_vote super(ClassificationTree, self).fit(X, y) # + from __future__ import division, print_function import numpy as np from sklearn import datasets import matplotlib.pyplot as plt import sys import os def main(): print ("-- Classification Tree --") data = datasets.load_iris() X = data.data y = data.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4) clf = ClassificationTree() clf.fit(X_train, y_train) y_pred = clf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print ("Accuracy:", accuracy) Plot().plot_in_2d(X_test, y_pred, title="Decision Tree", accuracy=accuracy, legend_labels=data.target_names) if __name__ == "__main__": main() # + import pandas as pd def main(): print ("-- Regression Tree --") # Load temperature data data = pd.read_csv('data/TempLinkoping2016.txt', sep="\t") time = np.atleast_2d(data["time"].values).T temp = np.atleast_2d(data["temp"].values).T X = standardize(time) # Time. Fraction of the year [0, 1] y = temp[:, 0] # Temperature. Reduce to one-dim X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3) model = RegressionTree() model.fit(X_train, y_train) y_pred = model.predict(X_test) y_pred_line = model.predict(X) # Color map cmap = plt.get_cmap('viridis') mse = mean_squared_error(y_test, y_pred) print ("Mean Squared Error:", mse) # Plot the results # Plot the results m1 = plt.scatter(366 * X_train, y_train, color=cmap(0.9), s=10) m2 = plt.scatter(366 * X_test, y_test, color=cmap(0.5), s=10) m3 = plt.scatter(366 * X_test, y_pred, color='black', s=10) plt.suptitle("Regression Tree") plt.title("MSE: %.2f" % mse, fontsize=10) plt.xlabel('Day') plt.ylabel('Temperature in Celcius') plt.legend((m1, m2, m3), ("Training data", "Test data", "Prediction"), loc='lower right') plt.show() if __name__ == "__main__": main() # -
DecisionTreeClassification/Decision_Trees.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_pv982ru" # # Stock Prices # + [markdown] graffitiCellId="id_qto2uz2" # You are given access to yesterday's stock prices for a single stock. The data is in the form of an array with the stock price in 30 minute intervals from 9:30 a.m EST opening to 4:00 p.m EST closing time. With this data, write a function that returns the maximum profit obtainable. You will need to buy before you can sell. # # For example, suppose you have the following prices: # # `prices = [3, 4, 7, 8, 6]` # # >Note: This is a shortened array, just for the sake of example—a full set of prices for the day would have 13 elements (one price for each 30 minute interval betwen 9:30 and 4:00), as seen in the test cases further down in this notebook. # # In order to get the maximum profit in this example, you would want to buy at a price of 3 and sell at a price of 8 to yield a maximum profit of 5. In other words, you are looking for the greatest possible difference between two numbers in the array. # # Fill out the function below and run it against the test cases. Take into consideration the time complexity of your solution. # + graffitiCellId="id_z7ws71z" def max_returns(prices): """ Calculate maxiumum possible return Args: prices(array): array of prices Returns: int: The maximum profit possible """ return prices # + [markdown] graffitiCellId="id_uc722im" # <span class="graffiti-highlight graffiti-id_uc722im-id_o4cterg"><i></i><button>Hide Solution</button></span> # + graffitiCellId="id_o4cterg" # Solution def max_returns(arr): """ The idea is to pick two dates: 1. buy date 2. sell date We will keep track of our max profit while iterating over the list At each step we will make the greedy choice by choosing prices such that our profit is maximum """ min_price_index = 0 max_price_index = 1 current_min_price_index = 0 if len(arr) < 2: return for index in range(1, len(arr)): # current minimum price if arr[index] < arr[current_min_price_index]: current_min_price_index = index # current max profit if arr[max_price_index] - arr[min_price_index] < arr[index] - arr[current_min_price_index]: max_price_index = index min_price_index = current_min_price_index max_profit = arr[max_price_index] - arr[min_price_index] return max_profit # + graffitiCellId="id_plirn7n" # Test Cases def test_function(test_case): prices = test_case[0] solution = test_case[1] output = max_returns(prices) if output == solution: print("Pass") else: print("Fail") # - prices = [2, 2, 7, 9, 9, 12, 18, 23, 34, 37, 45, 54, 78] solution = 76 test_case = [prices, solution] test_function(test_case) prices = [54, 18, 37, 9, 11, 48, 23, 1, 7, 34, 2, 45, 67] solution = 66 test_case = [prices, solution] test_function(test_case) prices = [78, 54, 45, 37, 34, 23, 18, 12, 9, 9, 7, 2, 2] solution = 0 test_case = [prices, solution] test_function(test_case)
concepts/Advanced Algorithms/03 Dynamic programming/05 Stock Prices.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import cvxpy as cp import matplotlib.pyplot as plt import numpy as np import torch from resalloc.fungible import AllocationProblem from resalloc.fungible import utilities from latexify import latexify latexify() # - def make_problem(n_jobs, n_resources, device): resource_limits = torch.rand(n_resources, device=device) * n_jobs + 1e3 job_demands = None utility_fn = utilities.Log() throughput_matrix = torch.tensor( np.random.uniform(low=1., high=2., size=(n_jobs, n_resources)) ).float() throughput_matrix = throughput_matrix.to(device) problem = AllocationProblem( throughput_matrix, resource_limits=resource_limits, job_demands=job_demands, utility_function=utility_fn ) return problem # + from collections import namedtuple Stats = namedtuple('Stats', ['mean', 'std', 'min', 'max']) # + from tqdm.auto import tqdm import time resources = torch.tensor(np.linspace(start=2, stop=100, num=10)).int() jobs = [int(1e5)]*len(resources) n_trials = 5 def benchmark(device, jobs, resources, n_trials): device_times = [] for j, r in tqdm(list(zip(jobs, resources))): print(f'{r} RESOURCES') torch.manual_seed(0) np.random.seed(0) times = [] for i in tqdm(range(n_trials)): problem = make_problem(j, r, device) prices = problem.utility_fn.initial_prices(problem) start = time.time() #_, stats = problem.solve(eps=eps, max_iter=max_iter, print_every=1, verbose=False) problem.evaluate_dual_function(prices) if device == 'cuda': torch.cuda.synchronize() end = time.time() del problem times.append(end - start) times = np.array(times) mean = np.mean(times) std = np.std(times) min_ = np.min(times) max_ = np.max(times) device_times.append(Stats(mean, std, min_, max_)) return device_times # - gpu_stats = benchmark('cuda', jobs, resources, n_trials) cpu_stats = benchmark('cpu', jobs, resources, n_trials) jobs_j = list(map(int, [1e2, 1e3, 1e4, 1e5, 1e6, 1e7])) resources_j = [4]*len(jobs_j) gpu_stats_j = benchmark('cuda', jobs_j, resources_j, n_trials) cpu_stats_j = benchmark('cpu', jobs_j, resources_j, n_trials) # + import cvxpy as cp def times_errs(stats): times = np.array([s.mean for s in stats]) up_err = [s.max - s.mean for s in stats] down_err = [s.mean - s.min for s in stats] yerr = np.array([up_err, down_err]) return times, yerr cpu_t, cpu_err = times_errs(cpu_stats) cpu_t_j, cpu_err_j = times_errs(cpu_stats_j) a = cp.Variable() b = cp.Variable() start = 1 targets = np.log(cpu_t) residuals = cp.sum_squares(targets[start:] - (a + b*np.log(resources[start:]))) problem = cp.Problem(cp.Minimize(residuals)) problem.solve() print(np.exp(a.value)) print(b.value) cpu_t_inputs = np.linspace(resources[start], 100, 100) cpu_t_quad_fit = np.exp((a.value + b.value*(np.log(cpu_t_inputs)))) cpu_t_quad_fit a = cp.Variable() b = cp.Variable() targets = np.log(gpu_t) start = 1 residuals = cp.sum_squares(targets[start:] - (a + b*np.log(resources[start:].cpu().numpy()))) problem = cp.Problem(cp.Minimize(residuals)) problem.solve() print(np.exp(a.value)) print(b.value) gpu_t_inputs = np.linspace(resources[start], resources[-1], 100) gpu_t_fit = np.exp(a.value + b.value*(np.log(gpu_t_inputs))) gpu_t_fit a = cp.Variable() b = cp.Variable() targets = np.log(cpu_t_j) start = 2 residuals = cp.sum_squares(targets[start:] - (a + b*np.log(np.array(jobs_j[start:])))) problem = cp.Problem(cp.Minimize(residuals)) problem.solve() print(np.exp(a.value)) print(b.value) cpu_t_j_inputs = np.linspace(jobs_j[start], jobs_j[-1], 100) cpu_t_j_fit = np.exp((a.value + b.value*(np.log(cpu_t_j_inputs)))) cpu_t_j_fit a = cp.Variable() b = cp.Variable() start = 3 targets = np.log(gpu_t_j) residuals = cp.sum_squares(targets[start:] - (a + b*np.log(np.array(jobs_j[start:])))) problem = cp.Problem(cp.Minimize(residuals)) problem.solve() print(np.exp(a.value)) print(b.value) gpu_t_j_inputs = np.linspace(jobs_j[start], jobs_j[-1], 100) gpu_t_j_fit = np.exp(a.value + b.value*(np.log(gpu_t_j_inputs))) gpu_t_j_fit # + fig, axs = plt.subplots(2, 1, figsize=(5.7, 6.8)) axs[0].set_xscale('log') axs[0].set_yscale('log') axs[0].plot(resources, gpu_t, label='gpu') axs[0].plot(resources, cpu_t, label='cpu') axs[0].plot(cpu_t_inputs, cpu_t_quad_fit, linestyle='--') axs[0].plot(gpu_t_inputs, gpu_t_fit, linestyle='--') axs[0].set_xlabel('number of resources') axs[0].set_ylabel('seconds') axs[0].legend() axs[1].set_xscale('log') axs[1].set_yscale('log') axs[1].plot(jobs_j, gpu_t_j, label='gpu') axs[1].plot(jobs_j, cpu_t_j, label='cpu') axs[1].plot(cpu_t_j_inputs, cpu_t_j_fit, linestyle='--') axs[1].plot(gpu_t_j_inputs, gpu_t_j_fit, linestyle='--') axs[1].set_xlabel('number of jobs') axs[1].set_ylabel('seconds') axs[1].legend() plt.tight_layout() plt.savefig('scaling.pdf') # + fig, axs = plt.subplots(2, 1, figsize=(5.7, 6.8)) axs[0].plot(resources, gpu_t, label='gpu') axs[0].plot(resources, cpu_t, label='cpu') axs[0].set_xlabel('number of resources') axs[0].set_ylabel('seconds') axs[0].legend() axs[1].plot(jobs_j, gpu_t_j, label='gpu') axs[1].plot(jobs_j, cpu_t_j, label='cpu') axs[1].set_xlabel('number of jobs') axs[1].set_ylabel('seconds') axs[1].legend() plt.tight_layout()
notebooks/scaling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Subset CMIP5 Datasets with xarray (ocean replica) # # xarray: http://xarray.pydata.org/en/stable/index.html # # Ocean data with opendap: # # https://aims3.llnl.gov/thredds/dodsC/cmip5_css01_data/cmip5/output1/NOAA-GFDL/GFDL-HIRAM-C360/sst2030/mon/ocean/Omon/r3i1p1/v20110601/tos/tos_Omon_GFDL-HIRAM-C360_sst2030_r3i1p1_202601-203012.nc.html # # ## Search CMIP5 Dataset # # using: https://esgf-pyclient.readthedocs.io/en/latest/index.html from pyesgf.search import SearchConnection conn = SearchConnection('https://esgf-data.dkrz.de/esg-search', distrib=True) ctx = conn.new_context( project='CMIP5', experiment='sst2030', model='GFDL-HIRAM-C360', #ensemble='r3i1p1', ensemble="r1i1p1", time_frequency='mon', realm='ocean', #data_node='aims3.llnl.gov', data_node="esgf-data1.ceda.ac.uk", #replica=False, ) ctx.hit_count for result in ctx.search(): print(result.dataset_id) result = ctx.search()[0] result.dataset_id files = result.file_context().search() for file in files: if 'tos' in file.opendap_url: tos_url = file.opendap_url print(tos_url) # ## Subset single dataset with xarray # # Using OpenDAP: http://xarray.pydata.org/en/stable/io.html?highlight=opendap#opendap import xarray as xr ds = xr.open_dataset(tos_url, chunks={'time': 120}) print(ds) da = ds['tos'] da = da.isel(time=slice(0, 1)) da = da.sel(lat=slice(-50, 50), lon=slice(0, 50)) da = da.squeeze() da # %matplotlib inline # ds.tos.isel(time=slice(0, 1)).squeeze().plot() da.plot() # ## Download to NetCDF da.to_netcdf('tos.nc') # + # df = ds.to_dataframe() # - import pandas as pd # + # df.to_csv("out.csv") # - # https://confluence.ecmwf.int/display/CKB/How+to+convert+NetCDF+to+CSV#HowtoconvertNetCDFtoCSV-Firstoption:PythonScript # # https://www.giss.nasa.gov/tools/panoply/ # ## CDO # https://pypi.org/project/cdo/ # https://code.mpimet.mpg.de/projects/cdo/wiki/Tutorial ds_url = "https://aims3.llnl.gov/thredds/dodsC/cmip5_css01_data/cmip5/output1/NOAA-GFDL/GFDL-HIRAM-C360/sst2030/mon/ocean/Omon/r3i1p1/v20110601/tos/tos_Omon_GFDL-HIRAM-C360_sst2030_r3i1p1_202601-203012.nc" # ds_url = "http://esgf-data1.ceda.ac.uk/thredds/dodsC/esg_dataroot/cmip5/output1/MOHC/HadCM3/rcp45/mon/atmos/Amon/r1i1p1/v20110905/tasmax/tasmax_Amon_HadCM3_rcp45_r1i1p1_200601-203012.nc" from cdo import Cdo cdo = Cdo() cdo.sinfo(input=ds_url) # + # cdo.seltimestep(input=f"{ds_url},1", output="out.nc") # + # cdo -sellonlatbox,-180,180,0,90 -seltimestep,1 https://aims3.llnl.gov/thredds/dodsC/cmip5_css01_data/cmip5/output1/NOAA-GFDL/GFDL-HIRAM-C360/sst2030/mon/ocean/Omon/r3i1p1/v20110601/tos/tos_Omon_GFDL-HIRAM-C360_sst2030_r3i1p1_202601-203012.nc out.nc
xarray-demo/subset-cmip5-ocean-replica.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/krakowiakpawel9/neural-network-course/blob/master/07_rnn/02_text_classifier.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="t1YGIwIfWFUr" # * @author: <EMAIL> # * @site: e-smartdata.org # + id="GGVUwZCAW3i1" # Przygotowanie środowiska do pracy z Tensorflow 2.0. # Jeśli otrzymasz błąd podczas instalacji Tensorflow uruchom tę komórkę raz jeszcze. # !pip uninstall -y tensorflow # !pip install -q tensorflow==2.0.0 # + id="78be6isKMk1F" import numpy as np import os from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding, Flatten # + id="1_sGCwvRJSV3" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="c3d0b021-f238-4c37-ba19-646696a5ef69" # !wget https://storage.googleapis.com/esmartdata-courses-files/ann-course/reviews.zip # !unzip -q reviews.zip # + id="z_axisX_IGRm" data_dir = './reviews' train_dir = os.path.join(data_dir, 'train') train_texts = [] train_labels = [] for label_type in ['neg', 'pos']: dir_name = os.path.join(train_dir, label_type) for fname in os.listdir(dir_name): if fname[-4:] == '.txt': f = open(os.path.join(dir_name, fname)) train_texts.append(f.read()) f.close() if label_type == 'neg': train_labels.append(0) else: train_labels.append(1) # + id="2af9MIaRf6JY" test_dir = os.path.join(data_dir, 'test') test_texts = [] test_labels = [] for label_type in ['neg', 'pos']: dir_name = os.path.join(test_dir, label_type) for fname in os.listdir(dir_name): if fname[-4:] == '.txt': f = open(os.path.join(dir_name, fname)) test_texts.append(f.read()) f.close() if label_type == 'neg': test_labels.append(0) else: test_labels.append(1) # + id="k0x32GMyKMp7" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="00a2d258-dad7-4e75-f6f2-f08d18bde50c" train_texts[:10] # + id="Gdt4Zjd6KP-b" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="16b875aa-43d9-454b-f92b-17fc57998185" train_labels[:10] # + id="h79vrnt4KVdu" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="21fac0fb-3941-4e1f-d0b9-e0473bde5df8" train_labels[-10:] # + id="MiUL6OaVLHO5" maxlen = 100 # skracamy recenzje do 100 słów num_words = 10000 # 10000 najczęściej pojawiających się słów embedding_dim = 100 tokenizer = Tokenizer(num_words=num_words) tokenizer.fit_on_texts(train_texts) # + id="XGSxx8AlLeoQ" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="cffb5529-9f3c-4960-9be2-7d6f2ee8219b" list(tokenizer.index_word.items())[:20] # + id="f-42jYxtLi7b" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="6faff706-cfde-4d1d-e850-8e8f34a41388" sequences = tokenizer.texts_to_sequences(train_texts) print(sequences[:3]) # + id="Lk-DcvC-L1K8" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ecaea55a-c32f-4db5-91c5-77cdd6d96691" word_index = tokenizer.word_index print(f'{len(word_index)} unikatowych słów.') # + id="GH4Jt-tKMQ3U" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="472d299f-2ca2-42b9-ff91-03fbbb62d26c" # skracamy recenzje do pierwszych 100 słów train_data = pad_sequences(sequences, maxlen=maxlen) train_data.shape # + id="HSf-mHGMMXft" colab={"base_uri": "https://localhost:8080/", "height": 531} outputId="9dda2914-cf83-4395-8d3c-db402b8cfe9d" train_data[:3] # + id="KAukg6vpMZml" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b37e2d2d-7efa-4b4d-dfc1-f50bc8ed2dfa" train_labels = np.asarray(train_labels) train_labels # + id="stuNAzzJMgOb" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="019abd16-87bf-4931-823a-74131a854a03" # przemieszanie próbek indices = np.arange(train_data.shape[0]) np.random.shuffle(indices) train_data = train_data[indices] train_labels = train_labels[indices] train_data.shape # + id="1Vkgz-7XNDDA" # podział na zbiór treningowy i walidacyjny training_samples = 15000 validation_samples = 10000 X_train = train_data[:training_samples] y_train = train_labels[:training_samples] X_val = train_data[training_samples: training_samples + validation_samples] y_val = train_labels[training_samples: training_samples + validation_samples] # + id="2yoYjUerebH1" colab={"base_uri": "https://localhost:8080/", "height": 291} outputId="f2b70093-4627-45f7-f2a0-c7f85df66e6c" # budowa modelu # Embedding(input_dim, output_dim) model = Sequential() model.add(Embedding(num_words, embedding_dim, input_length=maxlen)) model.add(Flatten()) model.add(Dense(16, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.summary() # + id="_rrNpYHOefGJ" model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # + id="q8ZUYZmmehRz" colab={"base_uri": "https://localhost:8080/", "height": 226} outputId="43dc8829-acdf-4ae6-db39-25487993c99c" history = model.fit(X_train, y_train, batch_size=32, epochs=5, validation_data=(X_val, y_val)) # + id="f4p_CH9KekfV" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="518d3589-cb8d-42d8-8075-c4c3a27d63f8" def plot_hist(history): import pandas as pd import plotly.graph_objects as go hist = pd.DataFrame(history.history) hist['epoch'] = history.epoch fig = go.Figure() fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['accuracy'], name='accuracy', mode='markers+lines')) fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['val_accuracy'], name='val_accuracy', mode='markers+lines')) fig.update_layout(width=1000, height=500, title='accuracy vs. val accuracy', xaxis_title='Epoki', yaxis_title='accuracy', yaxis_type='log') fig.show() fig = go.Figure() fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['loss'], name='loss', mode='markers+lines')) fig.add_trace(go.Scatter(x=hist['epoch'], y=hist['val_loss'], name='val_loss', mode='markers+lines')) fig.update_layout(width=1000, height=500, title='loss vs. val loss', xaxis_title='Epoki', yaxis_title='loss', yaxis_type='log') fig.show() plot_hist(history) # + id="bAp68DNOicop" sequences = tokenizer.texts_to_sequences(test_texts) X_test = pad_sequences(sequences, maxlen=maxlen) y_test = np.asarray(test_labels) model.evaluate(X_test, y_test, verbose=0) # + [markdown] id="ha3ie7WYooKf" # ### Simple RNN # + id="tSsUcqjejhN3" from tensorflow.keras.layers import SimpleRNN, LSTM # + id="XlETiy3qovM-" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="2f7876d7-8b11-4413-f9bb-6a19247d674d" model = Sequential() model.add(Embedding(10000, 32)) model.add(SimpleRNN(16)) model.add(Dense(1, activation='sigmoid')) model.summary() # + id="ld5_7Gq5o3hf" model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # + id="dEfuChmmpBSf" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="19aec7c4-361b-4268-ce26-42b86b78c053" history = model.fit(X_train, y_train, batch_size=32, epochs=10, validation_data=(X_val, y_val)) # + id="PIE20h4SpDke" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b034e916-e026-45cd-9c05-5ccf201c643d" plot_hist(history) # + id="PSbvrNewqkoi" colab={"base_uri": "https://localhost:8080/", "height": 257} outputId="031adf18-1e4a-4dea-8c68-362f61d86895" model = Sequential() model.add(Embedding(10000, 32)) model.add(LSTM(16)) model.add(Dense(1, activation='sigmoid')) model.summary() # + id="KkOn8z-WqwzK" model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) # + id="KhCry9gfqxPA" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="e0a1e341-6128-42a1-cb28-f0d718d77a4d" history = model.fit(X_train, y_train, batch_size=32, epochs=10, validation_data=(X_val, y_val)) # + id="Ow9iy7NAq0l9" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9667c726-33a0-49c9-d18f-27a1e40ca935" plot_hist(history) # + id="bpRRZOO8jxgh" colab={"base_uri": "https://localhost:8080/", "height": 397} outputId="faf91cfd-c291-446a-c7a9-ecbc1f06cfa2" model = Sequential() model.add(Embedding(10000, 32)) model.add(LSTM(16)) model.add(Dense(1, activation='sigmoid')) model.summary() model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, batch_size=32, epochs=3, validation_data=(X_val, y_val)) # + id="g5umGkUUIcYX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0c3530b5-5baf-43a0-b827-273df39d8919" sequences = tokenizer.texts_to_sequences(test_texts) X_test = pad_sequences(sequences, maxlen=maxlen) y_test = np.asarray(test_labels) model.evaluate(X_test, y_test, verbose=0) # + id="ezGZMiFHI_y_"
07_rnn/02_text_classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import csv import MySQLdb con = MySQLdb.connect(host="localhost", user="root", passwd="", db="prefeitura_sbac") cursor = con.cursor() Dados = pd.read_excel("Consulta de Quadro Funcional ITAJAI.xlsx") Dados.info() qtd_registros = len(Dados) qtd_registros # ### DROP TABELA drop_table = 'DROP TABLE itajai' cursor.execute(drop_table) # ### Criando Tabela create_table = """CREATE TABLE IF NOT EXISTS itajai (id int not null auto_increment PRIMARY KEY, nome varchar(255), cargo varchar(255), matricula varchar(30), CPF varchar(40) default 'Nao informado', horas_mes varchar(40) DEFAULT 'Nao informado', modifield datetime DEFAULT CURRENT_TIMESTAMP, cidade varchar(200) default 'Itajaí', tabela varchar(255) default 'itajai')DEFAULT CHARSET = utf8;""" cursor.execute(create_table) # ### SQL INSERT DADOS query = """INSERT INTO itajai (nome, cargo, matricula, horas_mes, CPF) VALUES (%s, %s, %s, %s, %s)""" cont = 0; for r in range(0, qtd_registros): # Assign values from each row values = (Dados['Nome'][cont], Dados['Cargo'][cont], Dados['Matrícula'][cont], Dados['Horas/mês'][cont], Dados['CPF'][cont]) # Execute sql Query cursor.execute(query, values) cont = cont + 1 # Close the cursor cursor.close() # Commit the transaction con.commit() # Close the database connection con.close()
PORTAL DE TRANSPARENCIA/ITAJAI/INSERT DADOS DB BC.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: ipykernel_py2 # --- # ## Logarithmic Returns # *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt MSFT = pd.read_csv('D:/Python/MSFT_2000_2017.csv', index_col = 'Date') MSFT # ### Log Returns # $$ # ln(\frac{P_t}{P_{t-1}}) # $$ MSFT.head() # Calculate the Log returns for Microsoft. MSFT['log_return'] = np.log(MSFT['Adj Close'] / MSFT['Adj Close'].shift(1)) print MSFT['log_return'] # Plot the results on a graph. MSFT['log_return'].plot(figsize=(8, 5)) plt.show() # Estimate the daily and the annual mean of the obtained log returns. log_return_d = MSFT['log_return'].mean() log_return_d log_return_a = MSFT['log_return'].mean() * 250 log_return_a # Print the result in a presentable form. print str(round(log_return_a, 5) * 100) + ' %' # **** # Repeat this exercise for any stock of interest to you. :)
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/6_Calculating a Security's Rate of Return in Python - Logarithmic Returns (3:39)/Logarithmic Returns - Solution_csv.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.1 # language: julia # name: julia-1.6 # --- # # Optimization # # of linear-quadratic problems. # # Many optimization problems in finance and econometrics involve linear-quadratic objectives/constraints. This notebook illustrates how the package [Convex.jl](https://github.com/jump-dev/Convex.jl) can be used for this. The example is (for pedagogical reasons) the same as in the other notebooks on optimization. Otherwise, the methods illustrated here are well suited for cases when the objective involves the portfolio variance ($ w'\Sigma w $) or when the estimation problem is based on minimizing the sum of squared residuals ($u'u$). # # The notebook also uses [SCS.jl](https://github.com/jump-dev/SCS.jl) (for the optimization algorithm). To check for convergence, we also need a function from the [MathOptInterface.jl](https://github.com/jump-dev/MathOptInterface.jl) package. # ## Load Packages and Utility Functions # + using Printf, LinearAlgebra, Convex, SCS import MathOptInterface const MOI = MathOptInterface include("jlFiles/printmat.jl") # - # ## A Linear-Quadratic Minimization Problem # # with/without constraints. # # We specify a matrix $Q$ and a vector $c$ and write the loss function as $b'Qb + c'b$ where $b$ are the choice variables. # # We consider several cases below: no restrictions on $b$, bounds on $b$, a linear equality restriction and a non-linear inequality restriction. Q = [1 0; #we want to minimize b'Q*b + c'b 0 16] #this is the same as minimizing (x-2)^2 + (4y+3)^2 c = [-4, 24] # ## Unconstrained Minimization # + n = length(c) b = Variable(n) #define the choice variables L1 = quadform(b,Q) #part 1 of the objective, b'Q*b L2 = dot(c,b) #part 2, c'b problem = minimize(L1+L2) solve!(problem,()->SCS.Optimizer(verbose=false)) problem.status == MOI.OPTIMAL ? b_sol = evaluate(b) : b_sol = NaN println("Unconstrained minimization: the solution should be (2,-3/4)") printmat(b_sol) # - # ## Constrained Minimization # + c1 = [2.75 <= b[1],b[2] <= -0.3] #bounds on the solution problem = minimize(L1+L2,c1) solve!(problem,()->SCS.Optimizer(verbose=false)) problem.status == MOI.OPTIMAL ? b_sol = evaluate(b) : b_sol = NaN println("with bounds on the solution: the solution should be (2.75,-0.75)") printmat(b_sol) # + c2 = dot([1,2],b) == 3 #equality constraint problem = minimize(L1+L2,c2) solve!(problem,()->SCS.Optimizer(verbose=false)) problem.status == MOI.OPTIMAL ? b_sol = evaluate(b) : b_sol = NaN println("equality constraint: the solution should be (4,-1/2)") printmat(b_sol) # + c3 = b[2] + square(b[1]-4) <= 0 #non-linear inequality constraint problem = minimize(L1+L2,c3) solve!(problem,()->SCS.Optimizer(verbose=false)) problem.status == MOI.OPTIMAL ? b_sol = evaluate(b) : b_sol = NaN println("non-linear inequality constraint: the solution should be close to (3.1,-0.79)") printmat(b_sol) # -
Tutorial_22c_Optimization_Convex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Getting Started With TensorFlow: Basics # You’ll generally write TensorFlow programs, which you run as a chunk; This is at first sight kind of contradictory when you’re working with Python. However, if you would like, you can also use TensorFlow’s Interactive Session, which you can use to work more interactively with the library. This is especially handy when you’re used to working with IPython. # # For this notebook, you’ll focus on the second option: this will help you to get kickstarted with deep learning in TensorFlow. import tensorflow as tf #First, import the tensorflow library under the alias tf # Initialize two constants a = tf.constant(1) b = tf.constant(2) # add result = tf.add(a, b) # Intialize the Session sess = tf.Session() # Print the result print(sess.run(result)) # Close the session sess.close() # #### We can also use an array which be called a tensor in tensorflow to init A and B # + # Initialize two constants A = tf.constant([1,2,3,4]) B = tf.constant([5,6,7,8]) # add res1 = tf.add(A, B) # multiply res2 = tf.multiply(A, B) # Initialize Session and run with tf.Session() as sess: output = sess.run([res1, res2]) # Specify the output you want in the list print(output) # - # #### By using placeholders, we can feed arbitrary data into the compute graph. # + import numpy as np # Initialize two placeholders A = tf.placeholder(tf.float32, shape=(4,), name='A') B = tf.placeholder(tf.float32, shape=(4,), name='B') # add result = tf.add(A, B) with tf.Session() as sess: for i in range(10): a = np.random.randint(100, size=4) b = np.random.randint(100, size=4) print('a: {}, b: {}'.format(a, b)) print(sess.run([result], feed_dict={A: a, B: b}))
notebooks/00-a-plus-b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Manipulação de dados com Pandas Dataframes # <br> # <br> import pandas as pd import numpy as np # ## DataFrames stock = { 'AMZN': pd.Series([346.15,0.59,459,0.52,589.8,158.88], index = ['Closing price','EPS','Shares Outstanding(M)','Beta', 'P/E','Market Cap(B)']), 'GOOG': pd.Series([1133.43,36.05,335.83,0.87,31.44,380.64], index = ['Closing price','EPS','Shares Outstanding(M)','Beta','P/E','Market Cap(B)']), 'FB': pd.Series([61.48,0.59,2450,104.93,150.92], index = ['Closing price','EPS','Shares Outstanding(M)','P/E', 'Market Cap(B)']), 'YHOO': pd.Series([34.90,1.27,1010,27.48,0.66,35.36], index=['Closing price','EPS','Shares Outstanding(M)','P/E','Beta', 'Market Cap(B)']), 'TWTR':pd.Series([65.25,-0.3,555.2,36.23], index=['Closing price','EPS','Shares Outstanding(M)','Market Cap(B)']), 'AAPL':pd.Series([501.53,40.32,892.45,12.44,447.59,0.84], index=['Closing price','EPS','Shares Outstanding(M)','P/E','Market Cap(B)','Beta']) } type(stock) stock_df = pd.DataFrame(stock) stock_df stock_df = pd.DataFrame(stock, index = ['Closing price', 'EPS', 'Shares Outstanding(M)', 'P/E', 'Market Cap(B)', 'Beta']) stock_df stock_df = pd.DataFrame(stock, index = ['Closing price', 'EPS', 'Shares Outstanding(M)', 'P/E', 'Market Cap(B)', 'Beta'], columns = ['FB','TWTR','SCNW']) stock_df stock_df.index stock_df.columns # ## Operações com DataFrames Quadro_Medalhas_Olimpiada = { 'USA' : {'Ouro':46, 'Prata':37, 'Bronze':38}, 'China':{'Ouro':26, 'Prata':18, 'Bronze':26}, 'Britain':{'Ouro':27, 'Prata':23, 'Bronze':17}, 'Russe':{'Ouro':19, 'Prata':18, 'Bronze':19}, 'Germany':{'Ouro':17, 'Prata':10, 'Bronze':15} } olimpiada = pd.DataFrame.from_dict(Quadro_Medalhas_Olimpiada) olimpiada type(olimpiada) Medalhas_China = olimpiada['China'] Medalhas_China olimpiada.Russe olimpiada[['Russe','Germany']] olimpiada.get('Germany') olimpiada # iloc olimpiada.iloc[1] # ## Slicing olimpiada[:2] olimpiada[2:] olimpiada[::2] olimpiada[::-1] # ## Slicing por indexação olimpiada.loc['Ouro'] olimpiada.loc[:,'USA'] olimpiada.loc['Prata','China'] olimpiada.loc['Prata']['China'] olimpiada.loc['Prata'] olimpiada.loc['Ouro'] > 20 olimpiada.loc[:, olimpiada.loc['Ouro'] > 20] olimpiada.loc['Prata'] > 20 olimpiada.iloc[:2] olimpiada.iloc[2,0:2] olimpiada.iloc[2:3, :] olimpiada.iloc[1,:] olimpiada.iloc[2, 0] # ## Removendo um membro do dataframe¶ del olimpiada['USA'] olimpiada # ## Inserindo um membro no dataframe olimpiada.insert(0, 'Brasil', (7, 6, 6)) olimpiada # ## Resumo do DataFrame olimpiada.describe()
Cap02/04-Cap02-Manipulacao-de-Dados-com-Pandas-Dataframes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="gv3ILgZGywAc" # # **Overview** # This notebook benchmarks the MONAI's implementation of global mutual information ANTsPyx's implementation. # + [markdown] id="emx2G8VNyfVg" # # **Global Mutual Information** # Mutual information is an entropy-based measure of image alignment derived from probabilistic measures of image intensity # values. Because a large number of image samples are used to estimate image statistics, the effects of image noise on the # metric are attenuated. Mutual information is also robust under varying amounts of image overlap as the test image moves # with respect to the reference. [1] # # Formally, the mutual information between two images `A` and `B` is defined as the following # # <img src=https://latex.codecogs.com/svg.image?I(a%2Cb)%26space%3B%3D%26space%3B%5Csum_%7Ba%2Cb%7D%26space%3Bp(a%2Cb)%26space%3B%5Clog(%5Cfrac%7Bp(a%2Cb)%7D%7Bp(a)p(b)%7D)> # # where `a` and `b` respectively refers to intensity bin centers of `A` and `B`. # # We used Parzen windowing in our implementation - given a set of `n` samples in image `A`, each sample `x` contributes to # `p(a)` with a function of its intensity and the bin centre `a`: # # <img src=https://latex.codecogs.com/svg.image?p(a)%3D%26space%3B%5Cfrac%7B1%7D%7Bn%7D%26space%3B%5Csum_%7Bx%26space%3B%5Cin%26space%3BA%7D%26space%3BW(x%2C%26space%3Ba)> # # Similarly: # # <img src=https://latex.codecogs.com/svg.image?p(b)%3D%26space%3B%5Cfrac%7B1%7D%7Bn%7D%26space%3B%5Csum_%7By%26space%3B%5Cin%26space%3BB%7D%26space%3BW(y%2C%26space%3Bb)> # # To compute the joint distribution, we treat each sample as a pair of intensities of corresponding locations in the two images: # # <img src=https://latex.codecogs.com/svg.image?p(a%2Cb)%26space%3B%3D%26space%3B%5Cfrac%7B1%7D%7Bn%7D%5Csum_%7B(x%2Cy)%5Cin(A%2CB)%7D%26space%3BW(x%2Ca)W(y%2Cb)%26space%3B> # # # Two weighting functions - ``"gaussian"`` and ``"b-spline"`` - are provided. # Here, we compare our ``"b-spline"`` method with the validated [ANTsPy](https://antspy.readthedocs.io/en/latest/) # library. # # >[1] "PET-CT Image Registration in the Chest Using Free-form Deformations" # <NAME>, <NAME>, <NAME>, <NAME> and <NAME> # IEEE Transactions in Medical Imaging. Vol.22, No.1, # January 2003. pp.120-128. # + [markdown] id="a3hamU0vz5QY" # # **Setup enviornment** # + colab={"base_uri": "https://localhost:8080/"} id="0_v9XIO0z4jq" outputId="27e4eabb-0e7a-4d37-92e3-39f098ae1b56" # !python -c "import monai" || pip install -q "monai-weekly[nibabel]" # !python -c "import ants" || pip install -q antspyx==0.2.9 # !python -c "import plotly" || pip install -q plotly==5.3 # + id="spcc50wTqYkZ" import ants import os import tempfile import torch import plotly.graph_objects as go import numpy as np from monai import transforms from monai.apps.utils import download_url from monai.losses import GlobalMutualInformationLoss # + colab={"base_uri": "https://localhost:8080/"} id="xL0nYrF-q0du" outputId="5a53430e-14f7-492a-8c56-4ddb01cf0516" # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from monai.config import print_config print_config() # + [markdown] id="1YIQidqErCZ0" # # **Download data** # + colab={"base_uri": "https://localhost:8080/"} id="GTgU9PMbrm6z" outputId="2da2d797-d1e4-4553-e9d6-a36f010cd1c6" directory = os.environ.get("MONAI_DATA_DIRECTORY") root_dir = tempfile.mkdtemp() if directory is None else directory print(f"root dir is: {root_dir}") file_url = "https://drive.google.com/uc?id=17tsDLvG_GZm7a4fCVMCv-KyDx0hqq1ji" file_path = f"{root_dir}/Prostate_T2W_AX_1.nii" download_url(file_url, file_path) # + [markdown] id="_rz8fAg_uIqa" # # **Comparison** # Both ANTsPy's and our implementation follows [1] - a third order BSpline kernel is used for the pred image intensity PDF # and a zero order (box car) BSpline kernel is used for the target image intensity PDF. # # For benchmarking, we set the number of bins to 32, same as # [ANTsPy implementation](https://github.com/ANTsX/ANTsPy/blob/master/ants/lib/LOCAL_antsImageMutualInformation.cxx). # # We took a lower-pelvic 3d MRI as `a1` and transformed it to get `a2` and report the # Global Mutual Information between `a1` and `a2` derived with ANTsPy's and our implementation. # + [markdown] id="81Sm_mAWtL46" # Here, we first initialise a few functions necessary for comparison # + id="xwq56csXq2RF" def transformation( translate_params=(0., 0., 0.), rotate_params=(0., 0., 0.), ): """ Read and transform Prostate_T2W_AX_1.nii Args: translate_params: a tuple of 3 floats, translation is in pixel/voxel relative to the center of the input image. Defaults to no translation. rotate_params: a rotation angle in radians, a tuple of 3 floats for 3D. Defaults to no rotation. Returns: numpy array of shape HWD """ transform_list = [ transforms.LoadImaged(keys="img"), transforms.Affined( keys="img", translate_params=translate_params, rotate_params=rotate_params, as_tensor_output=False, device=None, ), transforms.NormalizeIntensityd(keys=["img"]) ] transformation = transforms.Compose(transform_list) return transformation({"img": file_path})["img"] # + id="LKGg1lALuJDY" def get_result(a1, a2): """ Calculate mutual information with both ANTsPyx and MONAI implementation Args: a1: numpy array of shape HWD a2: numpy array of shape HWD """ antspyx_result = ants.image_mutual_information( ants.from_numpy(a1), ants.from_numpy(a2) ) monai_result = GlobalMutualInformationLoss( kernel_type="b-spline", num_bins=32, sigma_ratio=0.015 )( torch.tensor(a1).unsqueeze(0).unsqueeze(0), torch.tensor(a2).unsqueeze(0).unsqueeze(0) ).item() return antspyx_result, monai_result # + id="DOTkgv5Yw18t" def plot(x, results, xaxis_title): """ Plot diagram to compare ANTsPyx and MONAI result Args: x: list, x_axis values results: list of list xaxis_title: str """ data = [ go.Scatter( x=x, y=y, name=n, mode="lines+markers", line={'color': color, 'width': 1}, ) for y, n, color in zip(results, ['ANTsPy', 'MONAI'], ['coral', 'cornflowerblue']) ] fig = go.Figure(data=data) fig.update_layout( xaxis_title=xaxis_title, yaxis_title='MutualInformation', yaxis_range=[-2.0, 0.0] ) fig.show() # + id="<KEY>" def compare_antspyx_monai(transform_params_list, transform_name): """ Args: transform_params_list: a list of tuple transform_name: str """ antspyx_result = [] monai_result = [] # a1 is the original image without translation and rotation a1 = transformation((0., 0., 0.)) for transform_params in transform_params_list: # translate/rotate the image to get a2 a2 = transformation( translate_params=transform_params[0], rotate_params=transform_params[1] ) a_r, m_r = get_result(a1, a2) antspyx_result.append(a_r) monai_result.append(m_r) # calculate the transformation euclidean_distance x = [np.linalg.norm(np.array(translation_param)) for translation_param in transform_params_list] # sort results by the transformation euclidean distance antspyx_result = [i for _, i in sorted(zip(x, antspyx_result))] monai_result = [i for _, i in sorted(zip(x, monai_result))] x = sorted(x) plot( x=x, results=[antspyx_result, monai_result], xaxis_title=transform_name, ) # + [markdown] id="kdsFsUIF7ew8" # The following image visualises the 3d MRI after transformed by different translation params: # # ![a](https://i.ibb.co/6X03szZ/translation-vis.png) # + [markdown] id="L2sd-lUbz-_E" # **Translation** # # First, we incrementally increase the translation in all (x, y, z) directions by (1.0, 1.0, 1.0). # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="at1PbfE6z_cX" outputId="d3c3ca0a-9619-4892-d301-a7742b5575d8" transform_params_list = [((i, i, i), (0., 0., 0.))for i in range(10)] compare_antspyx_monai(transform_params_list, "xyz_translation") # + [markdown] id="_95Yp0fe40M1" # Then, we translate in single directions by randomly sampled parameters. # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="O9Zh8O_u5OE9" outputId="e2f2eca9-0d51-4238-d95c-af7303070d9e" transform_params_list = [((np.random.rand() * 10, 0., 0.), (0., 0., 0.))for i in range(10)] compare_antspyx_monai(transform_params_list, "x_translation") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="D5UIJ1xN5bYT" outputId="407585fd-da33-4fd1-df53-74a358439ea6" transform_params_list = [((0., np.random.rand() * 10, 0.), (0., 0., 0.))for i in range(10)] compare_antspyx_monai(transform_params_list, "y_translation") # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="WtrtaPIL5bkQ" outputId="6a496a2e-c4fa-4f51-c22c-041bde26d726" transform_params_list = [((0., 0., np.random.rand() * 10), (0., 0., 0.))for i in range(10)] compare_antspyx_monai(transform_params_list, "z_translation") # + [markdown] id="JrU3BBcP5qqN" # **Rotation** # # We also incrementally increase the rotation in all (x, y, z) directions by (1.0, 1.0, 1.0). # + colab={"base_uri": "https://localhost:8080/", "height": 542} id="J8UWPiZ75-T8" outputId="5bc77d1f-c2bd-4a2a-d58c-d44730ce00cd" transform_params_list = [((0., 0., 0.), (np.pi / 100 * i, np.pi / 100 * i, np.pi / 100 * i))for i in range(10)] compare_antspyx_monai(transform_params_list, "rotation")
modules/benchmark_global_mutual_information.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="nCc3XZEyG3XV" # Lambda School Data Science # # *Unit 2, Sprint 3, Module 3* # # --- # # # # Applied Modeling, Module 3 # # You will use your portfolio project dataset for all assignments this sprint. # # ## Assignment # # Complete these tasks for your project, and document your work. # # - [ ] Continue to iterate on your project: data cleaning, exploration, feature engineering, modeling. # - [ ] Make at least 1 partial dependence plot to explain your model. # - [ ] Share at least 1 visualization on Slack. # # (If you have not yet completed an initial model yet for your portfolio project, then do today's assignment using your Tanzania Waterpumps model.) # # ## Stretch Goals # - [ ] Make multiple PDPs with 1 feature in isolation. # - [ ] Make multiple PDPs with 2 features in interaction. # - [ ] Use Plotly to make a 3D PDP. # - [ ] Make PDPs with categorical feature(s). Use Ordinal Encoder, outside of a pipeline, to encode your data first. If there is a natural ordering, then take the time to encode it that way, instead of random integers. Then use the encoded data with pdpbox. Get readable category names on your plot, instead of integer category codes. # # ## Links # - [<NAME>: Interpretable Machine Learning — Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) + [animated explanation](https://twitter.com/ChristophMolnar/status/1066398522608635904) # - [Kaggle / <NAME>: Machine Learning Explainability — Partial Dependence Plots](https://www.kaggle.com/dansbecker/partial-plots) # - [Plotly: 3D PDP example](https://plot.ly/scikit-learn/plot-partial-dependence/#partial-dependence-of-house-value-on-median-age-and-average-occupancy) # - import pandas as pd # + # %%capture import sys # If you're on Colab: if 'google.colab' in sys.modules: DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/' # !pip install category_encoders==2.* # !pip install eli5 # !pip install pdpbox # If you're working locally: else: DATA_PATH = '../data/' # - df = pd.read_csv('asteroids.csv') df.shape df.isnull().sum() df.describe().T df.describe(include = object).T
JWH_assignment_DS_233.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Importing the Libraries # + import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import plotly.graph_objects as go import plotly.express as px import os pd.set_option("display.max_columns",None) pd.set_option("display.max_rows",None) # - # # Loading Data # ## Load Acura Data # + # Importing the data def import_data(data_name): file_folder = r"C:\Users\PSALISHOL\Documents\My Projects\Car Prediction\data\interim" filepath = os.path.join(file_folder, data_name+".csv") # Reading the file data = pd.read_csv(filepath) data = data.drop("Unnamed: 0",axis=1) data["City"] = data["City"].apply(lambda city: city.replace("LA", "Los Angeles")) return data # Importing the data Acura = import_data("Acura_model_cl") Acura.info() # - Acura.isnull().sum() Acura.head() # ## Load Audi Data # Importing the data def import_data(data_name, info=False): file_folder = r"C:\Users\PSALISHOL\Documents\My Projects\Car Prediction\data\interim" filepath = os.path.join(file_folder, data_name+".csv") # Reading the file data = pd.read_csv(filepath) data = data.drop("Unnamed: 0",axis=1) data["City"] = data["City"].apply(lambda city: city.replace("LA", "Los Angeles")) if info is True: Audi.info() return data Audi = import_data("Audi_cl", info=True) # Dealing with missing values def missing_val(data): cat_fea_missing = [feature for feature in data.columns if data[feature].dtype == object and data[feature].isnull().sum() >0] num_fea_missing = [feature for feature in data.columns if data[feature].dtype != object and data[feature].isnull().sum() >0] for feature in cat_fea_missing: data[feature] = data[feature].fill(data[feature].mode(),inplace=True) for feature in num_fea_missing: data[feature] = data[feature].filln # Importing the data and recleaning the data def import_data(data_name, info=False): file_folder = r"C:\Users\PSALISHOL\Documents\My Projects\Car Prediction\data\interim" filepath = os.path.join(file_folder, data_name+".csv") # Reading the file data = pd.read_csv(filepath) data = data.drop("Unnamed: 0",axis=1) data["City"] = data["City"].apply(lambda city: city.replace("LA", "Los Angeles")) if info is True: Audi.info() return data Bmw = import_data("BMW_cl", info=True) Bmw.head() Bmw["fueltype"].value_counts() Bmw[feature].fillna()
notebooks/Exploration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Simple Gaussian Mixture Model on Toy Data # ## Goal # # This demo walks you through a "Hello World" example of using **bnpy** from within Python. # # We'll train a Gaussian mixture model using expectation maximization (EM). It should only take 30 seconds to produce a nice visualization. # # ## Installation # # Follow [these instructions](../Installation.md) to make sure you've got **bnpy** and its dependencies installed correctly. # # Throughout this demo (and other demos), we'll assume you successfully installed **bnpy** in a directory which is on your **\$PYTHONPATH**. # # We can use the following import statements to load bnpy and matplotlib plotting utilities. import bnpy # %pylab inline from bnpy.viz.PlotUtil import ExportInfo bnpy.viz.PlotUtil.ConfigPylabDefaults(pylab) # ## Toy dataset : `AsteriskK8` # We'll use a simple dataset of 2D points, drawn from 8 well-separated Gaussian clusters. import AsteriskK8 Data = AsteriskK8.get_data() # We can visualize this dataset as follows: # + pylab.plot(Data.X[:,0], Data.X[:,1], 'k.'); pylab.axis('image'); pylab.xlim([-1.75, 1.75]); pylab.xticks([-1, 0, 1]); pylab.ylim([-1.75, 1.75]); pylab.yticks([-1, 0, 1]); # Ignore this block. Only needed for auto-generation of documentation. if ExportInfo['doExport']: W_in, H_in = pylab.gcf().get_size_inches() figpath100 = '../docs/source/_static/GaussianToyData_%dx%d.png' % (100, 100) pylab.savefig(figpath100, bbox_inches=0, pad_inches=0, dpi=ExportInfo['dpi']/W_in); # - # ## Running inference with **bnpy** # # We'll fit a $K=8$ Gaussian mixture model to a simple toy dataset. Here's the code along with its output to **stdout**. # hmodel, RInfo = bnpy.run('AsteriskK8', 'FiniteMixtureModel', 'Gauss', 'EM', nLap=50, K=8, convergeThr=0.001) # That's it. **bnpy.run** is the function that runs experiments. It handles loading data, constructing models and applying learning algorithms. # # ## Frequently Asked Questions # # ### Where are results saved? # # Model parameters will also be saved to a subdirectory of $BNPYOUTDIR. # ### How do I plot the objective function over time? # # Here, we plot the log evidence (sometimes called the evidence lower bound or ELBO). bnpy.viz.PlotELBO.plotJobsThatMatchKeywords('AsteriskK8/defaultjob', taskids=1); pylab.ylim([-3.8, 1.2]) # ### How do I plot the learned model parameters? # # We can examine the resulting model parameters (means and covariances) using **bnpy**'s built-in [visualization tools](../Code/Viz/Visualization.md). # Specifically, we can use the `plotCompsForTask` method to visualize the final result of this run. # # This creates a 2D plot of the 8 learned components (colored elliptical contours). # # Each component's Gaussian pdf is shown as a contour plot, where lines represent contours of equal probability density. The contours of a Gaussian always form an ellipse. # # + bnpy.viz.PlotComps.plotCompsForTask('AsteriskK8/defaultjob/1/', Data=Data); pylab.axis('image'); pylab.xlim([-1.75, 1.75]); pylab.xticks([-1, 0, 1]); pylab.ylim([-1.75, 1.75]); pylab.yticks([-1, 0, 1]); # Ignore this block. Only needed for auto-generation of documentation. if ExportInfo['doExport']: W_in, H_in = pylab.gcf().get_size_inches() figpath100 = '../docs/source/_static/GaussianToyData_FiniteMixtureModel_EM_SingleRunDemo_%dx%d.png' % (100, 100) pylab.savefig(figpath100, bbox_inches=0, pad_inches=0, dpi=ExportInfo['dpi']/W_in); # - # ### Will the solution always be perfect? # # No learning algorithm in **bnpy** is guaranteed to find the best solution. Instead, EM and variational inference are vulnerable to *local optima*. We think this is important to highlight from the first demo, to raise awareness of this issue.
demos/GaussianToyData-FiniteMixtureModel-EM-SingleRunDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python3 # --- # + ## Introduction to Linear Modeling in Python # + active="" # Course Description # One of the primary goals of any scientist is to find patterns in data and build models to describe, predict, and extract insight from those patterns. The most fundamental of these patterns is a linear relationship between two variables. This course provides an introduction to exploring, quantifying, and modeling linear relationships in data, by demonstrating techniques such as least-squares, linear regression, estimatation, and bootstrap resampling. Here you will apply the most powerful modeling tools in the python data science ecosystem, including scipy, statsmodels, and scikit-learn, to build and evaluate linear models. By exploring the concepts and applications of linear models with python, this course serves as both a practical introduction to modeling, and as a foundation for learning more advanced modeling techniques and tools in statistics and machine learning. # - # + ### MODULE 1 ### Exploring Linear Trends # + active="" # We start the course with an initial exploration of linear relationships, including some motivating examples of how linear models are used, and demonstrations of data visualization methods from matplotlib. We then use descriptive statistics to quantify the shape of our data and use correlation to quantify the strength of linear relationships between two variables. # - # + ## Reasons for Modeling: Interpolation # Compute the total change in distance and change in time total_distance = distances[-1] - distances[0] total_time = times[-1] - times[0] # Estimate the slope of the data from the ratio of the changes average_speed = total_distance / total_time # Predict the distance traveled for a time not measured elapse_time = 2.5 distance_traveled = average_speed * elapse_time print("The distance traveled is {}".format(distance_traveled)) # - # + ## Reasons for Modeling: Extrapolation # Select a time not measured. time = 8 # Use the model to compute a predicted distance for that time. distance = model(8) # Inspect the value of the predicted distance traveled. print(distance) # Determine if you will make it without refueling. answer = (distance <= 400) print(answer) # - # + ## Reasons for Modeling: Estimating Relationships # Complete the function to model the efficiency. def efficiency_model(miles, gallons): return np.mean( miles / gallons ) # Use the function to estimate the efficiency for each car. car1['mpg'] = efficiency_model( car1['miles'] , car1['gallons'] ) car2['mpg'] = efficiency_model( car2['miles'] , car2['gallons'] ) # Finish the logic statement to compare the car efficiencies. if car1['mpg'] > car2['mpg'] : print('car1 is the best') elif car1['mpg'] < car2['mpg'] : print('car2 is the best') else: print('the cars have the same efficiency') # - # + # Create figure and axis objects using subplots() fig, axis = plt.subplots() # Plot line using the axis.plot() method line = axis.plot(times , distances , linestyle=" ", marker="o", color="red") # Use the plt.show() method to display the figure plt.show() # - # + ## Plotting the Model on the Data # Pass times and measured distances into model model_distances = model(times, distances) # Create figure and axis objects and call axis.plot() twice to plot data and model distances versus times fig, axis = plt.subplots() axis.plot(times, measured_distances, linestyle=" ", marker="o", color="black", label="Measured") axis.plot(times, model_distances, linestyle="'-'", marker=None, color="red", label="Modeled") # Add grid lines and a legend to your plot, and then show to display axis.grid(True) axis.legend(loc="best") plt.show() # - # + ## Plotting the Model on the Data # Pass times and measured distances into model model_distances = model(times, measured_distances) # Create figure and axis objects and call axis.plot() twice to plot data and model distances versus times fig, axis = plt.subplots() axis.plot(times, measured_distances, linestyle=" ", marker="o", color="black", label="Measured") axis.plot(times, model_distances, linestyle='-', marker=None, color="red", label="Modeled") # Add grid lines and a legend to your plot, and then show to display axis.grid(True) axis.legend(loc="best") plt.show() # - # + ## Visually Estimating the Slope & Intercept # Look at the plot data and guess initial trial values trial_slope = 1 trial_intercept = 2 # input thoses guesses into the model function to compute the model values. xm, ym = model(trial_intercept, trial_slope) # Compare your your model to the data with the plot function fig = plot_data_and_model(xd, yd, xm, ym) plt.show() # Repeat the steps above until your slope and intercept guess makes the model line up with the data. final_slope = 1 final_intercept = 2 # - # + ## Mean, Deviation, & Standard Deviation # Compute the deviations by subtracting the mean offset dx = x - np.mean(x) dy = y - np.mean(y) # Normalize the data by dividing the deviations by the standard deviation zx = dx / np.std(x) zy = dy / np.std(y) # Plot comparisons of the raw data and the normalized data fig = plot_cdfs(dx, dy, zx, zy) # - # + ## Covariance vs Correlation # Compute the covariance from the deviations. dx = x - np.mean(x) dy = y - np.mean(y) covariance = np.mean(dx * dy) print("Covariance: ", covariance) # Compute the correlation from the normalized deviations. zx = dx / np.std(x) zy = dy / np.std(y) correlation = np.mean(zx * zy) print("Correlation: ", correlation) # Plot the normalized deviations for visual inspection. fig = plot_normalized_deviations(zx, zy) ## <script.py> output: ## Covariance: 69.6798182602 ## Correlation: 0.982433369757 # - # + ## Correlation Strength # Complete the function that will compute correlation. def correlation(x,y): x_dev = x - np.mean(x) y_dev = y - np.mean(y) x_norm = x_dev / np.std(x) y_norm = y_dev / np.std(y) return np.mean(x_norm * y_norm) # Compute and store the correlation for each data set in the list. for name, data in data_sets.items(): data['correlation'] = correlation(data['x'], data['y']) print('data set {} has correlation {:.2f}'.format(name, data['correlation'])) # Assign the data set with the best correlation. best_data = data_sets['A'] # - # + ## MODULE 2 ## Building Linear Models # - # + active="" # Here we look at the parts that go into building a linear model. Using the concept of a Taylor Series, we focus on the parameters slope and intercept, how they define the model, and how to interpret the them in several applied contexts. We apply a variety of python modules to find the model that best fits the data, by computing the optimal values of slope and intercept, using least-squares, numpy, statsmodels, and scikit-learn. # - # + ## Model Components # Define the general model as a function def model(x, a0=3, a1=2, a2=0): return a0 + (a1*x) + (a2*x*x) # Generate array x, then predict ym values for specific, non-default a0 and a1 x = np.linspace(-10, 10, 21) ym = model(x) # Plot the results, ym versus x fig = plot_prediction(x, ym) # - # + ## Model Parameters # Complete the plotting function definition def plot_data_with_model(xd, yd, ym): fig = plot_data(xd, yd) # plot measured data fig.axes[0].plot(xd, ym, color='red') # over-plot modeled data plt.show() return fig # Select new model parameters a0, a1, and generate modeled `ym` from them. a0 = 128 a1 = 25 ym = model(xd, a0, a1) # Plot the resulting model to see whether it fits the data fig = plot_data_with_model(xd, yd, ym) # - # + ## Linear Proportionality # Complete the function to convert C to F def convert_scale(temps_C): (freeze_C, boil_C) = (0, 100) (freeze_F, boil_F) = (32, 212) change_in_C = boil_C - freeze_C change_in_F = boil_F - freeze_F slope = boil_F/ boil_C intercept = freeze_F - freeze_C temps_F = intercept + (slope * temps_C) return temps_F # Use the convert function to compute values of F and plot them temps_C = np.linspace(0, 100, 101) temps_F = convert_scale(temps_C) fig = plot_temperatures(temps_C, temps_F) # - # + ## Slope and Rates-of-Change # Compute an array of velocities as the slope between each point diff_distances = np.diff(distances) diff_times = np.diff(times) velocities = diff_distances / diff_times # Chracterize the center and spread of the velocities v_avg = np.mean(velocities) v_max = np.max(velocities) v_min = np.min(velocities) v_range = v_max - v_min # Plot the distribution of velocities fig = plot_velocity_timeseries(times[1:], velocities) # - # + ## Intercept and Starting Points # Import ols from statsmodels, and fit a model to the data from statsmodels.formula.api import ols model_fit = ols(formula=" masses ~ volumes", data=df) model_fit = model_fit.fit() # Extract the model parameter values, and assign them to a0, a1 a0 = model_fit.params['Intercept'] a1 = model_fit.params['volumes'] # Print model parameter values with meaningful names, and compare to summary() print( "container_mass = {:0.4f}".format(a0) ) print( "solution_density = {:0.4f}".format(a1) ) print( model_fit.summary() ) # - <script.py> output: container_mass = 5.4349 solution_density = 1.1029 OLS Regression Results ============================================================================== Dep. Variable: masses R-squared: 0.999 Model: OLS Adj. R-squared: 0.999 Method: Least Squares F-statistic: 1.328e+05 Date: Thu, 16 May 2019 Prob (F-statistic): 1.19e-156 Time: 19:09:14 Log-Likelihood: 102.39 No. Observations: 101 AIC: -200.8 Df Residuals: 99 BIC: -195.5 Df Model: 1 Covariance Type: nonrobust ============================================================================== coef std err t P>|t| [0.025 0.975] ------------------------------------------------------------------------------ Intercept 5.4349 0.023 236.805 0.000 5.389 5.480 volumes 1.1029 0.003 364.408 0.000 1.097 1.109 ============================================================================== Omnibus: 0.319 Durbin-Watson: 2.072 Prob(Omnibus): 0.852 Jarque-Bera (JB): 0.169 Skew: 0.100 Prob(JB): 0.919 Kurtosis: 3.019 Cond. No. 20.0 ============================================================================== Warnings: [1] Standard Errors assume that the covariance matrix of the errors is correctly specified. # + ## Residual Sum of the Squares # Load the data x_data, y_data = load_data() # Model the data with specified values for parameters a0, a1 y_model = model(x_data, a0=150, a1=25) # Compute the RSS value for this parameterization of the model rss = np.sum(np.square(y_model - y_data)) print("RSS = {}".format(rss)) # - # + ## Minimizing the Residuals # Complete function to load data, build model, compute RSS, and plot def compute_rss_and_plot_fit(a0, a1): xd, yd = load_data() ym = model(xd, a0, a1) residuals = ym - yd rss = np.sum(np.square(residuals)) summary = "Parameters a0={}, a1={} yield RSS={:0.2f}".format(a0, a1, rss) fig = plot_data_with_model(xd, yd, ym, summary) return rss, summary # Chose model parameter values and pass them into RSS function rss, summary = compute_rss_and_plot_fit(a0=150, a1=25) print(summary) # - # + ## Visualizing the RSS Minima # Loop over all trial values in a1_array, computing rss for each a1_array = np.linspace(15, 35, 101) for a1_trial in a1_array: y_model = model(x_data, a0=150, a1=a1_trial) rss_value = compute_rss(y_data, y_model) rss_list.append(rss_value) # Find the minimum RSS and the a1 value from whence it came rss_array = np.array(rss_list) best_rss = np.min(rss_array) best_a1 = a1_array[np.where(rss_array==best_rss)] print('The minimum RSS = {}, came from a1 = {}'.format(best_rss, best_a1)) # Plot your rss and a1 values to confirm answer fig = plot_rss_vs_a1(a1_array, rss_array) # - # + ## Least-Squares with `numpy` # prepare the means and deviations of the two variables x_mean = np.sum(x)/len(x) y_mean = np.sum(y)/len(y) x_dev = x - np.mean(x) y_dev = y - np.mean(y) # Complete least-squares formulae to find the optimal a0, a1 a1 = np.sum(x_dev * y_dev) / np.sum( np.square(x_dev) ) a0 = np.mean(y) - (a1 * np.mean(x)) # Use the those optimal model parameters a0, a1 to build a model y_model = model(x, a0, a1) # plot to verify that the resulting y_model best fits the data y fig, rss = compute_rss_and_plot_fit(a0, a1) # - # + ## Optimization with Scipy # Define a model function needed as input to scipy def model_func(x, a0, a1): return a0 + (a1*x) # Load the measured data you want to model x_data, y_data = load_data() # call curve_fit, passing in the model function and data; then unpack the results param_opt, param_cov = optimize.curve_fit(model_func, x_data, y_data) a0 = param_opt[0] # a0 is the intercept in y = a0 + a1*x a1 = param_opt[1] # a1 is the slope in y = a0 + a1*x # test that these parameters result in a model that fits the data fig, rss = compute_rss_and_plot_fit(a0, a1) # - # + ## Least-Squares with `statsmodels` # Pass data and `formula` into ols(), use and `.fit()` the model to the data model_fit = ols(formula="y_column ~ x_column", data=df).fit() # Use .predict(df) to get y_model values, then over-plot y_data with y_model y_model = model_fit.predict(df) fig = plot_data_with_model(x_data, y_model, y_model) # Extract the a0, a1 values from model_fit.params a0 = model_fit.params['Intercept'] a1 = model_fit.params['x_column'] # Visually verify that these parameters a0, a1 give the minimum RSS fig, rss = compute_rss_and_plot_fit(a0, a1) # - # + ### MODULE 3 ## Making Model Predictions # - # + ## Linear Model in Anthropology # import the sklearn class LinearRegression and initialize the model from sklearn.linear_model import LinearRegression model = LinearRegression(fit_intercept=False) # Prepare the measured data arrays and fit the model to them legs = legs.reshape(len(legs),1) heights = heights.reshape(len(heights),1) model.fit(legs, heights) # Use the fitted model to make a prediction for the found femur fossil_leg = 50.7 fossil_height = model.predict(fossil_leg) print("Predicted fossil height = {:0.2f} cm".format(fossil_height[0,0])) # - # + ## Linear Model in Oceanography # Import LinearRegression class, build a model, fit to the data from sklearn.linear_model import LinearRegression model = LinearRegression(fit_intercept=True) model.fit(years, levels) # Use model to make a prediction for one year, 2100 future_year = 2100 future_level = model.predict(future_year) print("Prediction: year = {}, level = {:.02f}".format(future_year, future_level[0,0])) # Use model to predict for many years, and over-plot with measured data years_forecast = np.linspace(1970, 2100, 131).reshape(-1, 1) levels_forecast = model.predict(years_forecast) fig = plot_data_and_forecast(years, levels, years_forecast, levels_forecast) # - # + ## Linear Model in Cosmology # Fit the model, based on the form of the formula model_fit = ols(formula="velocities ~ distances", data=df).fit() # Extract the model parameters and associated "errors" or uncertainties a0 = model_fit.params['Intercept'] a1 = model_fit.params['distances'] e0 = model_fit.bse['Intercept'] e1 = model_fit.bse['distances'] # Print the results print('For slope a1={:.02f}, the uncertainty in a1 is {:.02f}'.format(a1, e1)) print('For intercept a0={:.02f}, the uncertainty in a0 is {:.02f}'.format(a0, e0)) # - # + ## Interpolation: Inbetween Times # build and fit a model to the df_monthly data model_fit = ols('Close ~ DayCount', data=df_monthly).fit() # Use the model FIT to the MONTHLY data to make a predictions for both monthly and daily data df_monthly['Model'] = model_fit.predict(df_monthly.DayCount) df_daily['Model'] = model_fit.predict(df_daily.DayCount) # Plot the monthly and daily data and model, compare the RSS values seen on the figures fig_monthly = plot_model_with_data(df_monthly) fig_daily = plot_model_with_data(df_daily) # - # + ## Extrapolation: Going Over the Edge # Compute the residuals, "data - model", and determine where [residuals < tolerance] residuals = np.abs(y_model - y_data) tolerance = 100 x_good = x_data[residuals < tolerance] # Find the min and max of the "good" values, and plot y_data, y_model, and the tolerance range print('Minimum good x value = {}'.format(np.min(x_good))) print('Maximum good x value = {}'.format(np.max(x_good))) fig = plot_data_model_tolerance(x_data, y_data, y_model, tolerance) # - # + ## RMSE Step-by-step # Build the model and compute the residuals "model - data" y_model = model_fit_and_predict(x_data, y_data) residuals = y_model - y_data # Compute the RSS, MSE, and RMSE and print the results RSS = np.sum(np.square(residuals)) MSE = RSS/len(residuals) RMSE = np.sqrt(MSE) print('RMSE = {:0.2f}, MSE = {:0.2f}, RSS = {:0.2f}'.format(RMSE, MSE, RSS)) # <script.py> output: # RMSE = 26.23, MSE = 687.83, RSS = 14444.48 # + active="" # Notice that instead of computing RSS and normalizing with division by len(residuals) to get the MSE, you could have just applied np.mean(np.square()) to the residuals. Another useful point to help you remember; you can think of the MSE like a variance, but instead of differencing the data from its mean, you difference the data and the model. Similarly, think of RMSE as a standard deviation. # - # + ## R-Squared # Compute the residuals and the deviations residuals = y_model - y_data deviations = np.mean(y_data) - y_data # Compute the variance of the residuals and deviations var_residuals = np.sum(np.square(residuals)) var_deviations = np.sum(np.square(deviations)) # Compute r_squared as 1 - the ratio of RSS/Variance r_squared = 1 - (var_residuals / var_deviations) print('R-squared is {:0.2f}'.format(r_squared)) # - # + ## Variation Around the Trend # Store x_data and y_data, as times and distances, in df, and use ols() to fit a model to it. df = pd.DataFrame(dict(times=x_data, distances=y_data)) model_fit = ols(formula="distances ~ times", data=df).fit() # Extact the model parameters and their uncertainties a0 = model_fit.params['Intercept'] e0 = model_fit.bse['Intercept'] a1 = model_fit.params['times'] e1 = model_fit.bse['times'] # Print the results with more meaningful names print('Estimate of the intercept = {:0.2f}'.format(a0)) print('Uncertainty of the intercept = {:0.2f}'.format(e0)) print('Estimate of the slope = {:0.2f}'.format(a1)) print('Uncertainty of the slope = {:0.2f}'.format(e1)) # - # + ## Variation in Two Parts # Build and fit two models, for columns distances1 and distances2 in df model_1 = ols(formula="distances1 ~ times", data=df).fit() model_2 = ols(formula="distances2 ~ times", data=df).fit() # Extract R-squared for each model, and the standard error for each slope se_1 = model_1.bse['times'] se_2 = model_2.bse['times'] rsquared_1 = model_1.rsquared rsquared_2 = model_2.rsquared # Print the results print('Model 1: SE = {:0.3f}, R-squared = {:0.3f}'.format(se_1, rsquared_1)) print('Model 2: SE = {:0.3f}, R-squared = {:0.3f}'.format(se_2, rsquared_2)) # - # + ### MODULE 4 ### Estimating Model Parameters # + active="" # In our final chapter, we introduce concepts from inferential statistics, and use them to explore how maximum likelihood estimation and bootstrap resampling can be used to estimate linear model parameters. We then apply these methods to make probabilistic statements about our confidence in the model parameters. # - # + ## Sample Statistics versus Population # Compute the population statistics print("Population mean {:.1f}, stdev {:.2f}".format( population.mean(), population.std() )) # Set random seed for reproducibility np.random.seed(42) # Construct a sample by randomly sampling 31 points from the population sample = np.random.choice(population, size=31) # Compare sample statistics to the population statistics print(" Sample mean {:.1f}, stdev {:.2f}".format( sample.mean(), sample.std() )) # <script.py> output: # Population mean 100.0, stdev 9.74 # Sample mean 102.1, stdev 9.34 # - # + ## Variation in Sample Statistics # Initialize two arrays of zeros to be used as containers means = np.zeros(num_samples) stdevs = np.zeros(num_samples) # For each iteration, compute and store the sample mean and sample stdev for ns in range(num_samples): sample = np.random.choice(population, num_pts) means[ns] = sample.mean() stdevs[ns] = sample.std() # Compute and print the mean() and std() for the sample statistic distributions print("Means: center={:>6.2f}, spread={:>6.2f}".format(means.mean(), means.std())) print("Stdevs: center={:>6.2f}, spread={:>6.2f}".format(stdevs.mean(), stdevs.std())) # - # + ## Visualizing Variation of a Statistic # Generate sample distribution and associated statistics means, stdevs = get_sample_statistics(population, num_samples=100, num_pts=1000) # Define the binning for the histograms mean_bins = np.linspace(97.5, 102.5, 51) std_bins = np.linspace(7.5, 12.5, 51) # Plot the distribution of means, and the distribution of stdevs fig = plot_hist(data=means, bins=mean_bins, data_name="Means", color='green') fig = plot_hist(data=stdevs, bins=std_bins, data_name="Stdevs", color='red') # - # + ## Estimation of Population Parameters # Compute the mean and standard deviation of the sample_distances sample_mean = np.mean(sample_distances) sample_stdev = np.std(sample_distances) # Use the sample mean and stdev as estimates of the population model parameters mu and sigma population_model = gaussian_model(sample_distances, mu=sample_mean, sigma=sample_stdev) # Plot the model and data to see how they compare fig = plot_data_and_model(sample_distances, population_model) # - # + ## Maximizing Likelihood, Part 1 # Compute sample mean and stdev, for use as model parameter value guesses mu_guess = np.mean(sample_distances) sigma_guess = np.std(sample_distances) # For each sample distance, compute the probability modeled by the parameter guesses probs = np.zeros(len(sample_distances)) for n, distance in enumerate(sample_distances): probs[n] = gaussian_model(distance, mu=mu_guess, sigma=sigma_guess) # Compute and print the log-likelihood as the sum() of the log() of the probabilities loglikelihood = np.sum(np.log(probs)) print('For guesses mu={:0.2f} and sigma={:0.2f}, the loglikelihood={:0.2f}'.format(mu_guess, sigma_guess, loglikelihood)) # - # + ## Maximizing Likelihood, Part 2 # Create an array of mu guesses, centered on sample_mean, spread out +/- by sample_stdev low_guess = sample_mean - 2*sample_stdev high_guess = sample_mean + 2*sample_stdev mu_guesses = np.linspace(low_guess, high_guess, 101) # Compute the loglikelihood for each model created from each guess value loglikelihoods = np.zeros(len(mu_guesses)) for n, mu_guess in enumerate(mu_guesses): loglikelihoods[n] = compute_loglikelihood(sample_distances, mu=mu_guess, sigma=sample_stdev) # Find the best guess by using logical indexing, the print and plot the result best_mu = mu_guesses[loglikelihoods==np.max(loglikelihoods)] print('Maximum loglikelihood found for best mu guess={}'.format(best_mu)) fig = plot_loglikelihoods(mu_guesses, loglikelihoods) # - # + ## Bootstrap and Standard Error # Use the sample_data as a model for the population population_model = sample_data # Resample the population_model 100 times, computing the mean each sample for nr in range(num_resamples): bootstrap_sample = np.random.choice(population_model, size=resample_size, replace=True) bootstrap_means[nr] = np.mean(bootstrap_sample) # Compute and print the mean, stdev of the resample distribution of means distribution_mean = np.mean(bootstrap_means) standard_error = np.std(bootstrap_means) print('Bootstrap Distribution: center={:0.1f}, spread={:0.1f}'.format(distribution_mean, standard_error)) # Plot the bootstrap resample distribution of means fig = plot_data_hist(bootstrap_means) # - # + ## Estimating Speed and Confidence # Resample each preloaded population, and compute speed distribution population_inds = np.arange(0, 99, dtype=int) for nr in range(num_resamples): sample_inds = np.random.choice(population_inds, size=100, replace=True) sample_inds.sort() sample_distances = distances[sample_inds] sample_times = times[sample_inds] a0, a1 = least_squares(sample_times, sample_distances) resample_speeds[nr] = a1 # Compute effect size and confidence interval, and print speed_estimate = np.mean(resample_speeds) ci_90 = np.percentile(resample_speeds, [5, 95]) print('Speed Estimate = {:0.2f}, 90% Confidence Interval: {:0.2f}, {:0.2f} '.format(speed_estimate, ci_90[0], ci_90[1])) # - # + ## Visualize the Bootstrap # Create the bootstrap distribution of speeds resample_speeds = compute_resample_speeds(distances, times) speed_estimate = np.mean(resample_speeds) percentiles = np.percentile(resample_speeds, [5, 95]) # Plot the histogram with the estimate and confidence interval fig, axis = plt.subplots() hist_bin_edges = np.linspace(0.0, 4.0, 21) axis.hist(resample_speeds, hist_bin_edges, color='green', alpha=0.35, rwidth=0.8) axis.axvline(speed_estimate, label='Estimate', color='black') axis.axvline(percentiles[0], label=' 5th', color='blue') axis.axvline(percentiles[1], label='95th', color='blue') axis.legend() plt.show() # - # + ## Test Statistics and Effect Size # Create two poulations, sample_distances for early and late sample_times. # Then resample with replacement, taking 500 random draws from each population. group_duration_short = sample_distances[sample_times < 5] group_duration_long = sample_distances[sample_times > 5] resample_short = np.random.choice(group_duration_short, size=500, replace=True) resample_long = np.random.choice(group_duration_long, size=500, replace=True) # Difference the resamples to compute a test statistic distribution, then compute its mean and stdev test_statistic = resample_long - resample_short effect_size = np.mean(test_statistic) standard_error = np.std(test_statistic) # Print and plot the results print('Test Statistic: mean={:0.2f}, stdev={:0.2f}'.format(effect_size, standard_error)) fig = plot_test_statistic(test_statistic) # - # + ## Null Hypothesis # In this exercise, we formulate the null hypothesis as # short and long time durations have no effect on total distance traveled. # We interpret the "zero effect size" to mean that if we shuffled samples # between short and long times, so that two new samples each have a mix of # short and long duration trips, and then compute the test statistic, on average it will be zero. # In this exercise, your goal is to perform the shuffling and resampling. Start with # the predefined group_duration_short and group_duration_long which are the # un-shuffled time duration groups. # Shuffle the time-ordered distances, then slice the result into two populations. shuffle_bucket = np.concatenate((group_duration_short, group_duration_long)) np.random.shuffle(shuffle_bucket) slice_index = len(shuffle_bucket)//2 shuffled_half1 = shuffle_bucket[0:slice_index] shuffled_half2 = shuffle_bucket[slice_index+1:] # Create new samples from each shuffled population, and compute the test statistic resample_half1 = np.random.choice(shuffled_half1, size=500, replace=True) resample_half2 = np.random.choice(shuffled_half2, size=500, replace=True) test_statistic = resample_half2 - resample_half1 # Compute and print the effect size effect_size = np.mean(test_statistic) print('Test Statistic, after shuffling, mean = {}'.format(effect_size)) # - # + ## Visualizing Test Statistics # From the unshuffled groups, compute the test statistic distribution resample_short = np.random.choice(group_duration_short, size=500, replace=True) resample_long = np.random.choice(group_duration_long, size=500, replace=True) test_statistic_unshuffled = resample_long - resample_short # Shuffle two populations, cut in half, and recompute the test statistic shuffled_half1, shuffled_half2 = shuffle_and_split(group_duration_short, group_duration_long) resample_half1 = np.random.choice(shuffled_half1, size=500, replace=True) resample_half2 = np.random.choice(shuffled_half2, size=500, replace=True) test_statistic_shuffled = resample_half2 - resample_half1 # Plot both the unshuffled and shuffled results and compare fig = plot_test_statistic(test_statistic_unshuffled, label='Unshuffled') fig = plot_test_statistic(test_statistic_shuffled, label='Shuffled') # - # + ## Visualizing the P-Value # Compute the test stat distribution and effect size for two population groups test_statistic_unshuffled = compute_test_statistic(group_duration_short, group_duration_long) effect_size = np.mean(test_statistic_unshuffled) # Randomize the two populations, and recompute the test stat distribution shuffled_half1, shuffled_half2 = shuffle_and_split(group_duration_short, group_duration_long) test_statistic_shuffled = compute_test_statistic(shuffled_half1, shuffled_half2) # Compute the p-value as the proportion of shuffled test stat values >= the effect size condition = test_statistic_shuffled >= effect_size p_value = len(test_statistic_shuffled[condition]) / len(test_statistic_shuffled) # Print p-value and overplot the shuffled and unshuffled test statistic distributions print("The p-value is = {}".format(p_value)) fig = plot_test_stats_and_pvalue(test_statistic_unshuffled, test_statistic_shuffled) # -
Introduction to Linear Modeling in Python/Introduction to Linear Modeling in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <H1>Part 1 : Notebook from here contains part of web scrapping given wikipedia page, creating dataframe from it and applying preprocessing steps # <H3><B>Installing required libraries pip install beautifulsoup4 pip install lxml conda install -c conda-forge folium=0.5.0 --yes pip install geopy # <H3>Importing all required files # + import requests # The "requests" library is the de facto standard for making HTTP requests in Python import pandas as pd # library for data analsysis import numpy as np # library to handle data in a vectorized manner import random # library for random number generation from geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values # libraries for displaying images from IPython.display import Image from IPython.core.display import HTML from IPython.display import display_html # tranforming json file into a pandas dataframe library from pandas.io.json import json_normalize import folium # plotting library from bs4 import BeautifulSoup #Beautiful Soup is a Python package for parsing HTML and XML documents. #It creates a parse tree for parsed pages that can be used to extract data from HTML, #which is useful for web scraping from sklearn.cluster import KMeans #For Kmeans Clustering import matplotlib as mplt #For plotting clusters # - # <H3>Web scrapping source = requests.get("https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M").text #source is an HTML file so now we use bs4 to parse this HTML file soup=BeautifulSoup(source,'lxml') #Running the "source" document through Beautiful Soup gives us a BeautifulSoup object, #which represents the document as a nested data structure print(soup.title) # + #To see the nested data structure #print(soup.prettify()) # - tab = str(soup.table) #Converting soup.table into string format display_html(tab,raw=True) # <H3>Converting HTML into dataframe, performing preprocessing and cleaning df0 = pd.read_html(tab) df1=df0[0] df1 #Dropping rows where boroughs are not assigned df1 = df1[df1.Borough != "Not assigned"] #Combining neighbourhoods with same postal address df2=df1.groupby(["Postal Code","Neighbourhood"],sort = False).agg(', '.join) #Resetting index df2.reset_index(inplace=True) # Replacing the name of the neighbourhoods which are 'Not assigned' with names of Borough df2['Neighbourhood'] = np.where(df2['Neighbourhood'] == 'Not assigned',df2['Borough'], df2['Neighbourhood']) df2 # Shape of data frame df2.shape # <H1>Part 2 : Notebook from here contains part of importing data for lattitude and longitude and merging it with previously generated data frame # <H3>Importing the csv file conatining the latitudes and longitudes for various neighbourhoods in Canada geo_df=pd.read_csv('http://cocl.us/Geospatial_data') geo_df.head() df2.head() # <H3>Merging both dataframes df2.rename(columns={'Postal Code':'Postcode'},inplace=True) geo_df.rename(columns={'Postalcode':'Postcode'},inplace=True) df2.head() geo_df.head() df_merged = pd.merge(geo_df, df2,on = "Postcode") df_merged.head() # <H1>Part 3 : Notebook from here contains part of clustering and plotting neighbourhoods # <H3> # Getting all the rows from the data frame which contains Toronto in their Borough. df_final = df_merged[df_merged['Borough'].str.contains('Toronto',regex=False)] df_final.head() # <H3> # Visualizing all the Neighbourhoods of the above data frame using Folium # # + map_toronto = folium.Map(location=[43.651070,-79.347015],zoom_start=10) for lat,lng,borough,neighbourhood in zip(df_final['Latitude'],df_final['Longitude'],df_final['Borough'],df_final['Neighbourhood']): label = '{}, {}'.format(neighbourhood, borough) label = folium.Popup(label, parse_html=True) folium.CircleMarker( [lat,lng], radius=5, popup=label, color='blue', fill=True, fill_color='#3186cc', fill_opacity=0.7, parse_html=False).add_to(map_toronto) map_toronto # - # <H3>Using Kmeans clustering k=5 toronto_clustering = df_final.drop(['Postcode','Borough','Neighbourhood'],1) kmeans = KMeans(n_clusters = k,random_state=0).fit(toronto_clustering) kmeans.labels_ df_final.insert(0, 'Cluster Labels', kmeans.labels_) df_final.head() # + import matplotlib.cm as cm import matplotlib.colors as colors # create map map_clusters = folium.Map(location=[43.651070,-79.347015],zoom_start=10) # set color scheme for the clusters x = np.arange(k) ys = [i + x + (i*x)**2 for i in range(k)] colors_array = cm.rainbow(np.linspace(0, 1, len(ys))) rainbow = [colors.rgb2hex(i) for i in colors_array] # add markers to the map markers_colors = [] for lat, lon, neighbourhood, cluster in zip(df_final['Latitude'], df_final['Longitude'], df_final['Neighbourhood'], df_final['Cluster Labels']): label = folium.Popup(' Cluster ' + str(cluster), parse_html=True) folium.CircleMarker( [lat, lon], radius=5, popup=label, color=rainbow[cluster-1], fill=True, fill_color=rainbow[cluster-1], fill_opacity=0.7).add_to(map_clusters) map_clusters # -
Capstone_Project_Week_3_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # standard imports import torch import torch.nn as nn from sklearn.datasets import make_moons from generate2d import sample2d, energy2d # FrEIA imports import FrEIA.framework as Ff import FrEIA.modules as Fm BATCHSIZE = 1000 N_DIM = 2 # we define a subnet for use inside an affine coupling block # for more detailed information see the full tutorial def subnet_fc(dims_in, dims_out): return nn.Sequential(nn.Linear(dims_in, 512), nn.ReLU(), nn.Linear(512, dims_out)) # a simple chain of operations is collected by ReversibleSequential inn = Ff.SequenceINN(N_DIM) for k in range(8): inn.append(Fm.AllInOneBlock, subnet_constructor=subnet_fc, permute_soft=True) optimizer = torch.optim.Adam(inn.parameters(), lr=0.001) # a very basic training loop for i in range(10000): optimizer.zero_grad() # sample data from the moons distribution # data, label = make_moons(n_samples=BATCHSIZE, noise=0.05) data = sample2d('8gaussians', BATCHSIZE) x = torch.Tensor(data) # pass to INN and get transformed variable z and log Jacobian determinant z, log_jac_det = inn(x) # calculate the negative log-likelihood of the model with a standard normal prior loss = 0.5*torch.sum(z**2, 1) - log_jac_det loss = loss.mean() / N_DIM # backpropagate and update the weights loss.backward() optimizer.step() if i % 100==0: print(i,loss) # + import matplotlib.pyplot as plt # sample from the INN by sampling from a standard normal and transforming # it in the reverse direction z = torch.randn(BATCHSIZE, N_DIM) samples, _ = inn(z, rev=True) plt.plot(samples.detach().numpy()[:,0], samples.detach().numpy()[:,1],'.') # - data = sample2d('8gaussians', 1000) plt.plot(data[:,0],data[:,1],'.')
Tutorials/toy_8-modes/.ipynb_checkpoints/toy2d_density_estimation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pymongo import MongoClient import pandas as pd import numpy as np from scipy import interp import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import StratifiedKFold # - conn = MongoClient('172.16.58.3', 27017) db = conn['NCU_CHKB'] collection = db['chkb_all_features_by_week'] all_features = pd.DataFrame(list(collection.find())) all_features = all_features.drop(['_id'], axis=1) collection = db['chkb_final_score'] final_score = pd.DataFrame(list(collection.find())) final_score = final_score.drop(['_id'], axis=1) all_features.head() chkb = pd.merge(all_features, final_score,on='username') chkb.fillna(0.0, inplace=True) chkb.head() chkb[chkb.columns.difference(['username'])]=chkb[chkb.columns.difference(['username'])].astype(float) data=chkb.groupby('username').sum().div(6) data['class']=data['final_score'] < 60 data=data.drop(['final_score'],axis=1) data=data.drop(['week'],axis=1) data # + # Import some data to play with #iris = datasets.load_iris() #kyoto_y = kyoto_1['score'] #kyoto_x = kyoto_1.drop(['score'], axis=1) X = data.drop(['class'], axis=1) y = data['class'] X, y = X[y != 2], y[y != 2] n_samples, n_features = X.shape # Add noisy features random_state = np.random.RandomState(0) X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # ############################################################################# # Classification and ROC analysis # Run classifier with cross-validation and plot ROC curves cv = StratifiedKFold(n_splits=6) classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state) # + tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) plt.figure(figsize=(10, 5)) i = 0 for train, test in cv.split(X, y): probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test]) # Compute ROC curve and area the curve fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1]) tprs.append(interp(mean_fpr, fpr, tpr)) tprs[-1][0] = 0.0 roc_auc = auc(fpr, tpr) aucs.append(roc_auc) plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc)) i += 1 plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) plt.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.') plt.xlim([-0.05, 1.05]) plt.ylim([-0.05, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() # -
ROCNCUCHKB.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.6 64-bit # name: python3 # --- # + # #!/usr/bin/env python3 import sys sys.path.insert(0, '/Users/aymericvie/Documents/GitHub/evology/evology/code/') from main import * time = 50_000 from parameters import * # wealth_coordinates = [0.42, 0.33, 0.25] wealth_coordinates = [1/3, 1/3, 1/3] # def main(mode, MAX_GENERATIONS, PROBA_SELECTION, POPULATION_SIZE, CROSSOVER_RATE, MUTATION_RATE, wealth_coordinates, tqdm_display): RdSearch = main("between", time, 0, 100, 0, MUTATION_RATE, wealth_coordinates, False) RdSearch.to_csv("rdsearch.csv") ProfAdapt = main("between", time, PROBA_SELECTION, 100, 0, 0, wealth_coordinates, False) ProfAdapt.to_csv("profadapt.csv") Static = main("static", time, 0, 100, 0, 0, wealth_coordinates, False) Static.to_csv("static.csv") # - Combined = main("between", time, PROBA_SELECTION, 100, 0, MUTATION_RATE, wealth_coordinates, False) Combined.to_csv("combined.csv")
evology/bin/SingleRun_AdapComp/data/GenData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import random import numpy as np import mmcv from tools_custom.dataset import * from pathlib import Path import pprint from PIL import Image from collections import defaultdict from tools_custom.config import cfg from mmdet.datasets import get_dataset train_dataset = get_dataset(cfg.data.train) # - val = mmcv.load('/home/arias/Projects/python/jinnan_chusai_mmdet/data/dataset/val.json') val.keys() sample # + def get_sample(idx=None): if idx is None: idx = random.randint(0, len(train_dataset.img_infos)) img = train_dataset.img_infos[idx].copy() img['ann'] = train_dataset.get_ann_info(idx) return img sample = get_sample() img = Image.open(str(Path(train_dataset.img_prefix, sample['filename']))) bboxes = sample['ann']['bboxes'] labels = sample['ann']['labels'] polygons = sample['ann']['mask_polys'] mmcv.imshow_polygon_bboxes(img, bboxes=bboxes, polygons=polygons, polygon_color='red', labels=labels, thickness=2, text_color='green') # + def get_sample(idx=None): if idx is None: idx = random.randint(0, len(train_dataset.img_infos)) img = train_dataset.img_infos[idx].copy() img['ann'] = train_dataset.get_ann_info(idx) return img sample = get_sample() img = Image.open(str(Path(train_dataset.img_prefix, sample['filename']))) bboxes = sample['ann']['bboxes'] labels = sample['ann']['labels'] polygons = sample['ann']['mask_polys'] mmcv.imshow_polygon_bboxes(img, bboxes=bboxes, polygons=polygons, polygon_color='red', labels=labels, thickness=2, text_color='green') # - import numpy as np import cv2 import matplotlib.pyplot as plt # + def create_mask_for_plant(image, color_space='bgr'): if color_space=='bgr': image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) elif color_space=='rgb': image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) else: raise TypeError('color_space must be "bgr" or "rgb"') hsv = [27, 56, 99] sensitivity = 20 lower_hsv = np.array([60 - sensitivity, 100, 50]) upper_hsv = np.array([60 + sensitivity, 255, 255]) mask = cv2.inRange(image_hsv, lower_hsv, upper_hsv) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11)) mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel) return mask def segment_plant(image): mask = create_mask_for_plant(image, color_space='rgb') output = cv2.bitwise_and(image, image, mask = mask) return output def sharpen_image(image): image_blurred = cv2.GaussianBlur(image, (0, 0), 3) image_sharp = cv2.addWeighted(image, 1.5, image_blurred, -0.5, 0) return image_sharp # + image = np.array(img) image_mask = create_mask_for_plant(image, color_space='rgb') image_segmented = segment_plant(image) image_sharpen = sharpen_image(image_segmented) fig, axs = plt.subplots(1, 4, figsize=(20, 20)) axs[0].imshow(image) axs[1].imshow(image_mask) axs[2].imshow(image_segmented) axs[3].imshow(image_sharpen)
visualize.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:pangeo] # language: python # name: conda-env-pangeo-py # --- # # Visualize ETOPO1 topography data import xarray as xr import hvplot.xarray import fsspec from dask.distributed import Client import geoviews as gv ds = xr.open_zarr(fsspec.get_mapper('s3://esip-qhub/noaa/bathy/etopo1_bed_g2', requester_pays=True), consolidated=True) # ### North America subset na = ds.topo.sel(lon=slice(-130,-50),lat=slice(15,50)) # Set land and water deeper that 1000m to NaN na = na.where(na<0) na = na.where(na>-1000) # Visualize with [Holoviz](holoviz.org) tools bathy_grid = na.hvplot.quadmesh(x='lon', y='lat', rasterize=True, geo=True, cmap='viridis') contours = na.hvplot.contour(x='lon', y='lat', levels=[-600, -100], cmap=['#000000'], geo=True) # Overlay bathy color-shaded grid, contours and ESRI tiles as basemap (bathy_grid * contours) * gv.tile_sources.ESRI
06_Bathy_Explorer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Investigating star formation in the W5 region # ### About this notebook # # This notebook demonstrates how to use the glue-jupyter package to explore Astronomical data for W5, which is a region in space where stars are currently forming. However, much of the functionality shown here would be applicable to other image and tabular datasets. In this notebook, we follow a similar analysis to that shown in the [Getting started](http://docs.glueviz.org/en/latest/getting_started/index.html) guide for the Qt-based glue application. # # ### About the data # # The data we will be using are ``w5.fits``, an image at 12µm taken with the [Wide-field Infrared Survey Explorer (WISE)](https://en.wikipedia.org/wiki/Wide-field_Infrared_Survey_Explorer), and ``w5_psc.csv``, a table of forming stars found in this region using the [Spitzer Space Telescope](http://www.spitzer.caltech.edu/). The data can be found in https://github.com/glue-viz/glue-example-data/tree/master/Astronomy/W5. # # For convenience we can use the ``require_data`` function to # automatically download them here: from glue_jupyter.data import require_data require_data('Astronomy/W5/w5.fits') require_data('Astronomy/W5/w5_psc.csv') # ### Starting up the glue Jupyter application # # Let's begin by creating a glue Jupyter application: import glue_jupyter as gj app = gj.jglue() # In the rest of this notebook, the ``app`` object will come in handy to load data, create new visualizations, create links, and so on. You'll notice that there are a few buttons above as well as an empty box. The buttons show the current selection mode (which will come in handy later on), and the box shows a list of the subsets (which is empty for now). # # # ### Reading in data # # Let's now read in some data! We can use the ``load_data`` method to do this: data_image = app.load_data('w5.fits') data_catalog = app.load_data('w5_psc.csv') # This is equivalent to just dragging and dropping a file onto glue, and it will automatically try and infer the type of data based on the available data loaders. Let's take a quick look at one of the datasets: print(data_catalog) # The ``Main components`` are the columns from the file, whereas the ``Coordinate components`` are auto-generated by glue (we can just ignore these). # # ### Making a scatter plot # # We are now ready to plot the data. Let's start off by making a scatter plot by calling ``scatter2d`` and specifying the two columns we want to plot and the dataset to get these columns from: scatter_viewer = app.scatter2d(x='[4.5]-[5.8]', y='[8.0]', data=data_catalog) # We can change which attributes are shown by modifying the viewer ``state``: scatter_viewer.state.y_att = data_catalog.id['[5.8]'] # ### Making a histogram # # Let's now make a histogram: histogram_viewer = app.histogram1d(x='Jmag', data=data_catalog) # As for the scatter plot, this is fully interactive, and you can control for example whether the histogram is normalized, or shows the cumulative distribution. # ### Making a selection # # Let's now make a selection - to do this, go to the histogram viewer, and click on the ``brush x`` button, then click and drag a range in the histogram. The selected range should appear in red, and the points should also show up in the scatter viewer. The subset will also appear in the list of subsets at the top of this notebook. You can try clicking and dragging again to select a different set of points, and you can do the same using the three brush tools in the scatter viewer. # # ### Showing the image # # The power of glue comes from the ability to link (or 'glue') together different datasets. Before we do that, let's just take a look at the image data that we read in before: image_viewer = app.imshow(data=data_image) # If you go to the 'w5' tab you will be able to adjust the appearance of the image. You can then also pan and zoom around (using two finger scrolling). # # ### Linking datasets # # We now get to the most important feature of glue - the ability to link datasets. In this case, the tabular data has columns called ``RAJ2000`` and ``DEJ2000`` that correspond to the ``Right Ascension`` and ``Declination`` attributes in the image data, which we can see here: print(data_image) # Since the attributes mean the same thing, we can use simple identity links between these attributes, but note that glue does support the ability to have non-identity links between attributes (with any arbitrary function). app.add_link(data_catalog, 'RAJ2000', data_image, 'Right Ascension') app.add_link(data_catalog, 'DEJ2000', data_image, 'Declination') # Finally, let's add the tabular data to the image viewer: image_viewer.add_data(data_catalog) # Now scroll back up and you should see the points, including the subset of points, overlaid on the image!
notebooks/Astronomy/W5/W5 Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.10 64-bit (''venv'': venv)' # name: python3 # --- # Import packages and modules import pandas as pd from sqlalchemy import create_engine import pymysql import cryptography from dotenv import load_dotenv, dotenv_values import os # Environment variables load_dotenv(verbose=True) USER = os.getenv("USER") PASSWORD = <PASSWORD>("PASSWORD") DATABASE = os.getenv("DATABASE_NAME") SERVER = os.getenv("SERVER") PORT = os.getenv("PORT") # Engine configuration cnx_str = "mysql+pymysql://{user}:{pw}@{server}:{port}/{db}".format(user=USER, pw=PASSWORD, server=SERVER, port=PORT, db=DATABASE) cnx = create_engine(cnx_str).connect() # Read data from table df = pd.read_sql("select * from gds_sale_transactions", con=cnx) # Hiding df.head() to remove PIIs # df.head() # Write data to table df.to_sql("sale_table", cnx, if_exists='append') cnx.close()
gds_automation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # Imports and random seeds import tensorflow as tf import matplotlib.pyplot as plt # %matplotlib inline import numpy as np np.random.seed(7) # - print(tf.__version__) # + # Load data and normalize fashion_mnist = mnist = tf.keras.datasets.fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 # - # Helper function to generate 100 random indexes specific to labels def generate_random_subset(label, ds_type): if ds_type == 'train': # Extract the label indexes index, = np.where(y_train==label) index_list = np.array(index) # Randomly shuffle the indexes np.random.shuffle(index_list) # Return 100 indexes return index_list[:100] elif ds_type == 'test': # Extract the label indexes index, = np.where(y_test==label) index_list = np.array(index) # Randomly shuffle the indexes np.random.shuffle(index_list) # Return 30 indexes return index_list[:30] # Decode the class names class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] # + # Generate the training subset indexes = [] for label in np.unique(y_train): index = generate_random_subset(label, 'train') indexes.append(index) all_indexes = [ii for i in indexes for ii in i] x_train_s, y_train_s = x_train[all_indexes[:1000]],\ y_train[all_indexes[:1000]] # Show a random image from the newly created training subset rand_num = np.random.randint(1, 1000) plt.imshow(x_train_s[rand_num],cmap=plt.cm.binary) plt.show() print('\nThe apparel is: ',str(class_names[y_train_s[rand_num]])) # + # Generate the test subset indexes = [] for label in np.unique(y_test): index = generate_random_subset(label, 'test') indexes.append(index) all_indexes = [ii for i in indexes for ii in i] x_test_s, y_test_s = x_test[all_indexes[:300]],\ y_test[all_indexes[:300]] # Show a random image from the newly created test subset rand_num = np.random.randint(1, 300) plt.imshow(x_test_s[rand_num],cmap=plt.cm.binary) plt.show() print('\nThe apparel is: ',str(class_names[y_test_s[rand_num]])) # + # Baseline model model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(128, activation='relu',kernel_initializer='he_normal'), tf.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() # + # Model plot from tensorflow.keras.utils import plot_model # Needs pydot and graphviz plot_model(model, to_file='model.png') img = plt.imread('model.png') plt.imshow(img) plt.show() # - # Train the network and validate model.fit(x_train_s, y_train_s, validation_data=(x_test_s, y_test_s), epochs=5, batch_size=32) # Predict on a few test images def show_single_preds(index): pred = model.predict_classes(np.expand_dims(x_test_s[index], axis=0)) print('Model\'s prediction: ',str(class_names[np.asscalar(pred)])) print('\nReality:', str(class_names[y_test_s[index]])) plt.imshow(x_test_s[index], cmap=plt.cm.binary) plt.show() show_single_preds(12) show_single_preds(101) show_single_preds(45) # Visulization of models activations layer_outputs = [layer.output for layer in model.layers[:3]] activation_model = tf.keras.models.Model(inputs=model.input, outputs=layer_outputs) # Get the raw image pixels: first test image in this case img_tensor = np.expand_dims(x_test_s[0], axis=0) # Get the activations activations = activation_model.predict(img_tensor) # What is there in the second layer? second_layer_activation = activations[1] print(second_layer_activation.shape) # Reshape reshaped = second_layer_activation.reshape(1, 16, 8) plt.matshow(reshaped[0, :, :], cmap=plt.cm.binary) # What is there in the output layer? output_layer_activation = activations[2] print(output_layer_activation.shape) # Reshape reshaped = output_layer_activation.reshape(1, 5, 2) plt.matshow(reshaped[0, :, :], cmap=plt.cm.binary) # + # Plotting model's confusion matrix import scikitplot as skplt preds = model.predict_classes(x_test_s) skplt.metrics.plot_confusion_matrix(y_test_s, preds, figsize=(7,7)) plt.show() # - # Saving the subsets for reproducibility np.save('x_train_s.npy', x_train_s) np.save('y_train_s.npy', y_train_s) np.save('x_test_s.npy', x_test_s) np.save('y_test_s.npy', y_test_s) # Load and verify a = np.load('tmp/x_train_s.npy') plt.imshow(a[0], cmap=plt.cm.binary) plt.show()
notebooks/Embracing simplicity - Subsets and baseline models.ipynb
# --- # jupyter: # jupytext: # formats: ipynb,md:myst # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (documentation_further_information)= # # # Further information # # ## What is documentation # # Documentation can have many different interpretations. A good definition is # given in {cite}`martraire2019living`: # # > The process of transferring valuable knowledge to other people now and also to # > people in the future. # # ```{note} # It is important to realise that the target of the documentation can be the # writer of the software itself at a future date. # ``` # # There are two types of documentation: # # - **Internal documentation** which includes things like docstrings and a good # choice of variable name. # - **External documentation** which includes things like `README.md` and other # separate documentation. # # For a software project to be well documented it needs **both** internal and # external documentation. # # In {cite}`martraire2019living` there are 4 properties of documentation: # # - Reliable: it needs to be accurate. # - Low effort: it should require minimal effort when changes are made to the code # base. # - Collaborative: it should be a tool from which collaboration can occur. # - Insightful: it should give information not only to be able to use the code but # also to understand specific reasons why certain decisions have been made as to # its design. # # ## What is the purpose of the four separate sections in documentation # # As discussed in <https://documentation.divio.com>: # # > "Tutorials are lessons that take the reader by the hand through a series of # > steps to complete a project of some kind. They are what your project needs in # > order to show a beginner that they can achieve something with it." # # > "How-to guides take the reader through the steps required to solve a # > real-world problem" # # > "Reference guides are technical descriptions of the machinery and how to # > operate it." # # > "Explanation, or discussions, clarify and illuminate a particular topic. They # > broaden the documentation’s coverage of a topic." # # It is natural when describing a project for the boundaries between these four # topics to become fuzzy. Thus, having them explicitly in four separate sections # ensures the reader is able to specifically find what they need. # # ## What alternatives are there to writing documentation in `README.md` # # A single `README.md` file is a good way to start documenting code. However as a # project grows it could be beneficial to use some other tools. One such example # of this is to use `sphinx`: <https://www.sphinx-doc.org/en/>. This uses a # different markup language called _restructured text_ # <https://docutils.sourceforge.io/rst.html> and helps build more complex # documents but also interfaces to the code itself if necessary. So for example it # is possible to include the code docstrings directly in the documentation (a good # way of adding to the reference section).
book/building-tools/06-documentation/why/.main.md.bcp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- #Installation """ $ pip install xcp-abcd. Alternatively, use Anaconda and get the conda packages from the conda-forge channel, which supports both Unix, Mac OS and Windows. """""" #Introduction """ This package uses the outputs of fmri-prep to generate a time series that has "nuisance" signals (generated in fmri-prep) removed from it. This is the thus the step between running fmri-prep and, for example, parcellating the time series and generating correlation matrices. """ # + slideshow={"slide_type": "skip"} import sys,os sys.path.insert(0, os.path.abspath("/Users/adebimpe/Documents/xcp_surface/xcp_surface/")) import pandas as pd from utils import read_ndata def readjson(jsonfile): import json with open(jsonfile) as f: data = json.load(f) return data # + [markdown] slideshow={"slide_type": "slide"} # ### <center> Update on the xcp-abcd </center> # #### Completed modules # ##### 1. Selection of confound matrix (Ciric .et .al 2017) # <img src="confoundmatrix.jpg" width="400"> # # DCAN BULK (DB)- 24P + WM + CSF +GS # # ##### 2. Regression # ##### 3. filtering # # They are all modular, independent and scalable # # + slideshow={"slide_type": "slide"} #Example of the ouputs of fmri-prep ran on a single ABCD subject and task (gambling) #This is the input you need to run XCP # %%bash tree /Users/adebimpe/Box/projects/xcpengine/sub-01/func # + slideshow={"slide_type": "slide"} #Let's define the files we are going to use: #this is the actual time series of the data we care about datafile='/Users/adebimpe/Box/projects/xcpengine/sub-01/func/sub-01_task-mixedgamblestask_run-1_space-fsLR_den-91k_bold.dtseries.nii' datafilejson='/Users/adebimpe/Box/projects/xcpengine/sub-01/func/sub-01_task-mixedgamblestask_run-1_space-fsLR_den-91k_bold.dtseries.json' # + slideshow={"slide_type": "fragment"} dataf = read_ndata(datafile) dataf.shape # + slideshow={"slide_type": "fragment"} """ cifti details: the ...fsLR_den-91k_bold.dtseries.nii file is what is called a cifti file for our purposes, this is Grayordinates files. CIFTI is a container format that holds both volumetric (regularly sampled in a grid) and surface (sampled on a triangular mesh) samples. Sub-cortical time series are sampled on a regular grid derived from one MNI template, while cortical time series are sampled on surfaces projected from the [Glasser2016] template. If CIFTI outputs are requested (with the --cifti-outputs argument), the BOLD series are also saved as dtseries.nii CIFTI2 files: """ #we can read in the cifti info from the json: datafjson=readjson(datafilejson) datafjson # + slideshow={"slide_type": "slide"} # confound timeseries matrix (2d) # read https://fmriprep.org/en/stable/outputs.html#confound-regressors-description for more information ''' Only a subset of these CompCor decompositions should be used for further denoising. The original Behzadi aCompCor implementation [Behzadi2007] can be applied using components from the combined masks, while the more recent Muschelli implementation [Muschelli2014] can be applied using the WM and CSF masks. ''' #this is the time series of nuisance signal we want to not care about anymore confile='/Users/adebimpe/Box/projects/xcpengine/sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.tsv' confjson='/Users/adebimpe/Box/projects/xcpengine/sub-01/func/sub-01_task-mixedgamblestask_run-1_desc-confounds_timeseries.json' confj=readjson(confjson) confj['a_comp_cor_00'] # + slideshow={"slide_type": "fragment"} confreg=pd.read_csv(confile, delimiter="\t", encoding="utf-8") confreg.head() # + [markdown] slideshow={"slide_type": "slide"} # ### Nuissance regressors selection # + slideshow={"slide_type": "slide"} """ There are limitless ways to regress out nuisance signal Here, we contrain the choices to four approaches. 9P: 6P + 2P + global signal 24P: (6P + their derivative) and their square DB: 24P + 2P + global signal 36P: (9P+ their derivative) and their square """ from interfaces.confound import ConfoundMatrix conf = ConfoundMatrix() conf.help() # + slideshow={"slide_type": "slide"} from interfaces.confound import ConfoundMatrix conf = ConfoundMatrix() conf.inputs.in_file = datafile conf.inputs.params = "DB" conf.run() # + slideshow={"slide_type": "fragment"} conf._results['confound_file'] # + slideshow={"slide_type": "fragment"} #check the confound matrix import pandas as pd confound_matrix = pd.read_csv(conf._results['confound_file'],header=None) confound_matrix.shape #confound_matrix # + [markdown] slideshow={"slide_type": "slide"} # ### Linear Regression # # # # #### Demean and detrend(order=1) first # ```python # def demean_detrend_data(data,TR,order=1): # ''' # data should be voxels/vertices by timepoints dimension # order=1 # # order of polynomial detrend is usually obtained from # # order = floor(1 + TR*nVOLS / 150) # TR= repetition time # this can be use for both confound and bold # ''' # # # demean the data first, check if it has been demean # if abs(np.mean(data)) > 1e-7: # mean_data =np.mean(data,axis=1) # means_expanded = np.outer(mean_data, np.ones(data.shape[1])) # demeand = data - means_expanded # else: # demeand=data # # x = np.linspace(0,(data.shape[1]-1)*TR,num=data.shape[1]) # predicted=np.zeros_like(demeand) # for j in range(demeand.shape[0]): # model = np.polyfit(x,demeand[j,:],order) # predicted[j,:] = np.polyval(model, x) # return demeand - predicted # # ``` # + [markdown] slideshow={"slide_type": "slide"} # ### before regression, implemented with scikit-learn¶ # # ```python # def linear_regression(data,confound): # # ''' # data : # numpy ndarray- vertices by timepoints # confound: # nuissance regressors reg by timepoints # return: # residual matrix # ''' # regr = LinearRegression() # regr.fit(confound.T,data.T) # y_pred = regr.predict(confound.T) # return data - y_pred.T # ``` # + slideshow={"slide_type": "slide"} from interfaces.regression import regress reg = regress() reg.help() # + slideshow={"slide_type": "subslide"} reg = regress() reg.inputs.in_file = datafile reg.inputs.confounds = conf._results['confound_file'] reg.inputs.tr = 3 reg.run() # + slideshow={"slide_type": "fragment"} reg._results['res_file'] # + [markdown] slideshow={"slide_type": "slide"} # ### Filtering # # band pass filtering # # ```python # def butter_bandpass(data,fs,lowpass,highpass,order=2): # ''' # data : voxels/vertices by timepoints dimension # fs : sampling frequency,=1/TR(s) # lowpass frequency # highpass frequency # ''' # # nyq = 0.5 * fs # lowcut = np.float(highpass) / nyq # highcut = np.float(lowpass) / nyq # b, a = butter(order, [lowcut, highcut], btype='band') # mean_data=np.mean(data,axis=1) # y=np.zeros_like(data) # for i in range(data.shape[0]): # y[i,:] = filtfilt(b, a, data[i,:]) # #add mean back # mean_datag=np.outer(mean_data, np.ones(data.shape[1])) # return y + mean_datag # # ``` # + slideshow={"slide_type": "slide"} from interfaces.filtering import FilteringData filt=FilteringData() filt.help() # + slideshow={"slide_type": "slide"} from interfaces.filtering import FilteringData filt=FilteringData() filt.inputs.in_file = reg._results['res_file'] filt.inputs.tr = 3 filt.inputs.lowpass = 0.08 filt.inputs.highpass = 0.01 filt.run() # + slideshow={"slide_type": "fragment"} filt._results['filt_file'] # + [markdown] slideshow={"slide_type": "slide"} # ## Carpet plot # + slideshow={"slide_type": "fragment"} import numpy as np from utils import plot_svg,compute_dvars,read_ndata from scipy.stats import pearsonr # + slideshow={"slide_type": "fragment"} confound = pd.read_csv(confile,delimiter="\t", encoding="utf-8") fd = confound['framewise_displacement'].to_numpy() fd = np.nan_to_num(fd) dvarbf = compute_dvars(read_ndata(datafile)) dvaraf = compute_dvars(read_ndata(filt._results['filt_file'])) # + slideshow={"slide_type": "slide"} #Carpet plot before regression and filtering plot_svg(fdata=read_ndata(datafile),fd=fd,dvars=dvarbf,tr=3,filename='beforeprocessing') # + slideshow={"slide_type": "slide"} plot_svg(fdata=read_ndata(filt._results['filt_file']),fd=fd,dvars=dvaraf,tr=3,filename='afterprocessing') # + slideshow={"slide_type": "slide"} bfp = pearsonr(fd,dvarbf) afp = pearsonr(fd,dvaraf) print('FD and DVARS correlation before processing: r = ' +str(bfp[0])) print('FD and DVARS correlation after processing: r = '+ str(afp[0])) # + slideshow={"slide_type": "fragment"} import seaborn as sns import matplotlib.pyplot as plt ax=sns.regplot(x=fd,y=dvarbf,label='Before processing') ax=sns.regplot(x=fd,y=dvaraf,label='after processing') plt.xlabel("FD"); plt.ylabel("DVARS") # + slideshow={"slide_type": "slide"}
xcp_abcd/notebooks/preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np df = pd.read_csv("./taxi_final.csv") df.head() df["pickup_latitude"] = df["pickup_latitude"].astype(str) df["pickup_longitude"] = df["pickup_longitude"].astype(str) df["pickup_latitude"] = pd.to_numeric(df["pickup_latitude"]) df["pickup_longitude"] = pd.to_numeric(df["pickup_longitude"]) labels_lat = ["{0}".format(i) for i in range(len(np.arange(40.494292,40.925521,0.009)))] df['group_lat'] = pd.cut(df.pickup_latitude, np.arange(40.494292,40.925521+0.009,0.009), labels=labels_lat) labels_long = ["{0}".format(i) for i in range(len(np.arange(-74.254380,-73.693835,0.009)))] df['group_long'] = pd.cut(df.pickup_longitude, np.arange(-74.254380,-73.693835+0.009,0.009), labels=labels_long) df_final = pd.DataFrame({"sum" : df.groupby(["group_lat","group_long","weekday","pick_hour"])["passenger_count"].sum()}).reset_index() df_final["group_lat"] = df_final["group_lat"].astype(int) df_final["group_long"] = df_final["group_long"].astype(int) # + def numeric_to_lat(numeric): result = 40.494292 + (numeric+1/2)*0.009 return result def numeric_to_long(numeric): result = -74.254380 + (numeric+1/2)*0.009 return result # - df_final["lat"] = df_final["group_lat"].map(numeric_to_lat) df_final["long"] = df_final["group_long"].map(numeric_to_long) df_return = df_final[["weekday","pick_hour","sum","lat","long"]] df_return.to_csv("./taxi_009.csv",index = None) df_return df = pd.read_csv("./taxi_009.csv") df = df.sort_values(by = ["weekday","pick_hour"],ascending=True) df.to_csv("./taxi_sort_009.csv", index=None)
data_preprocessing/traffic/taxi_second.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + def json2csv_depth(filename): """takes the json file COCO labeling format and extracts annotations in a csv format, where each line consists of: Image id, object label, x_min, y_min, x_max, ymax The function return all image ids(all_ids) and all images with annotations (all_ids_ann) """ import pandas as pd import json import os s = json.load(open(filename, 'r')) # print(json.dumps(s, indent=4, sort_keys=True) out_file = filename[:-5] + '.csv' out = open(out_file, 'w',encoding="utf-8") out.write('id,label,x1,y1,x2,y2\n') all_ids = [] for im in s['images']: all_ids.append(im['id']) all_ids_ann = [] #iterating over annotations for ann in s['annotations']: image_id = ann['image_id'] label = ann['category_id'] attrib = ann['attributes'] # print(attrib) if 'name' in attrib: label = attrib['name'] elif 'gender' in attrib: label = attrib['gender'] elif 'Species ' in attrib: label = attrib['Species '] # print(label) # if image_id<624: # print(image_id) for im in s['images']: if im['id'] == image_id: image_name = im['file_name'] image_width= im['width'] image_height= im['height'] break for ca in s['categories']: if ca['id'] == label: label = ca['name'] break x1 = ann['bbox'][0] xmin=x1/image_width x2 = ann['bbox'][0] + ann['bbox'][2] xmax=x2/image_width y1 = ann['bbox'][1] ymin=y1/image_height y2 = ann['bbox'][1] + ann['bbox'][3] ymax=y2/image_height out.write('{},{},{},{},{},{}\n'.format(image_name,label.lower(), xmin, ymin, xmax, ymax)) all_ids_ann.append(image_id) return [all_ids,all_ids_ann] # - filename ='/Users/seyran/Documents/GitHub/children_book_data/children_book_dataset/annotations/instances_default.json' [all_ids,all_ids_ann]=json2csv_depth(filename) all_ids = set(all_ids) len(all_ids) all_ids_ann = set(all_ids_ann) len(all_ids_ann) no_annotations = list(all_ids - all_ids_ann) print(no_annotations) len(no_annotations) # + # Remove images without any annotations imagefolder ='/Users/seyran/Documents/GitHub/children_book_data/children_book_dataset/images_copy' os.chdir(imagefolder) #os.listdir(imagefolder) s = json.load(open(filename, 'r')) for image_id in no_annotations: for im in s['images']: if im['id'] == image_id: image_name = im['file_name'] print(image_name) for root, dirs, files in os.walk(imagefolder): for filename in files: if filename == image_name: os.remove(filename) print(image_name) break # for image_id in no_annotations: # for im in s['images']: # if im['id'] == image_id: # image_name = im['file_name'] # out.write('{},{},{},{},{},{}\n'.format(image_name, -1, -1, -1, -1, -1)) # out.close() # Sort file by image id s1 = pd.read_csv(out_file) s1.sort_values('id', inplace=True) s1.to_csv(out_file, index=False) # - # merging two csv annotation files import pandas as pd csv1="/Users/seyran/Documents/GitHub/children_book_data/children_book_dataset/annotations/instances_default.csv" csv2="/Users/seyran/Documents/GitHub/children_book_data/children_book_dataset_incomplete/annotations/instances_default.csv" a = pd.read_csv(csv1) b = pd.read_csv(csv2) out = pd.concat([a,b]) with open('/Users/seyran/Documents/GitHub/children_book_data/concatenated_annotations.csv', 'w', encoding='utf-8') as f: out.to_csv(f, index=False) remote_path= "gs://depth-286210/childeren_book_images/" import pandas as pd csv_input = pd.read_csv('/Users/seyran/Documents/GitHub/children_book_data/concatenated_annotations.csv') csv_input.insert(0,'set','UNASSIGNED') csv_input.insert(1,'path',remote_path) csv_input.to_csv('output.csv')
.ipynb_checkpoints/json2csv_coco-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # models.qrnn.qrnn # Type an introduction of the package here. # + hide_input=true from fastai.gen_doc.nbdoc import * from fastai.models.qrnn.qrnn import * # + hide_input=true show_doc(QRNN) # - # [<code>QRNN</code>](http://docs.fast.ai/models.qrnn.qrnn.html#QRNN) # + hide_input=true show_doc(QRNN.forward) # - # `QRNN.forward` # + hide_input=true show_doc(QRNN.reset) # - # `QRNN.reset` # + hide_input=true show_doc(QRNNLayer) # - # [<code>QRNNLayer</code>](http://docs.fast.ai/models.qrnn.qrnn.html#QRNNLayer) # + hide_input=true show_doc(QRNNLayer.forward) # - # `QRNNLayer.forward` # + hide_input=true show_doc(QRNNLayer.reset) # - # `QRNNLayer.reset`
docs_src/models.qrnn.qrnn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Text and Annotation # Creating a good visualization involves guiding the reader so that the figure tells a story. # In some cases, this story can be told in an entirely visual manner, without the need for added text, but in others, small textual cues and labels are necessary. # Perhaps the most basic types of annotations you will use are axes labels and titles, but the options go beyond this. # Let's take a look at some data and how we might visualize and annotate it to help convey interesting information. We'll start by setting up the notebook for plotting and importing the functions we will use: # %matplotlib inline import matplotlib.pyplot as plt import matplotlib as mpl plt.style.use('seaborn-whitegrid') import numpy as np import pandas as pd # ## Example: Effect of Holidays on US Births # # Let's return to some data we worked with earler, in *Example: Birthrate Data*, where we generated a plot of average births over the course of the calendar year. # # We'll start with the same cleaning procedure we used there, and plot the results: # + births = pd.read_csv('data/births.csv') quartiles = np.percentile(births['births'], [25, 50, 75]) mu, sig = quartiles[1], 0.74 * (quartiles[2] - quartiles[0]) births = births.query('(births > @mu - 5 * @sig) & (births < @mu + 5 * @sig)') births['day'] = births['day'].astype(int) births.index = pd.to_datetime(10000 * births.year + 100 * births.month + births.day, format='%Y%m%d') births_by_date = births.pivot_table('births', [births.index.month, births.index.day]) births_by_date.index = [pd.datetime(2012, month, day) for (month, day) in births_by_date.index] # - fig, ax = plt.subplots(figsize=(12, 4)) births_by_date.plot(ax=ax); # When we're communicating data like this, it is often useful to annotate certain features of the plot to draw the reader's attention. # This can be done manually with the ``plt.text``/``ax.text`` command, which will place text at a particular x/y value: # + fig, ax = plt.subplots(figsize=(12, 4)) births_by_date.plot(ax=ax) # Add labels to the plot style = dict(size=10, color='gray') ax.text('2012-1-1', 3950, "New Year's Day", **style) ax.text('2012-7-4', 4250, "Independence Day", ha='center', **style) ax.text('2012-9-4', 4850, "Labor Day", ha='center', **style) ax.text('2012-10-31', 4600, "Halloween", ha='right', **style) ax.text('2012-11-25', 4450, "Thanksgiving", ha='center', **style) ax.text('2012-12-25', 3850, "Christmas ", ha='right', **style) # Label the axes ax.set(title='USA births by day of year (1969-1988)', ylabel='average daily births') # Format the x axis with centered month labels ax.xaxis.set_major_locator(mpl.dates.MonthLocator()) ax.xaxis.set_minor_locator(mpl.dates.MonthLocator(bymonthday=15)) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_minor_formatter(mpl.dates.DateFormatter('%h')); # - # The ``ax.text`` method takes an x position, a y position, a string, and then optional keywords specifying the color, size, style, alignment, and other properties of the text. # Here we used ``ha='right'`` and ``ha='center'``, where ``ha`` is short for *horizonal alignment*. # See the docstring of ``plt.text()`` and of ``mpl.text.Text()`` for more information on available options. # ## Transforms and Text Position # # In the previous example, we have anchored our text annotations to data locations. Sometimes it's preferable to anchor the text to a position on the axes or figure, independent of the data. In Matplotlib, this is done by modifying the *transform*. # # Any graphics display framework needs some scheme for translating between coordinate systems. # For example, a data point at $(x, y) = (1, 1)$ needs to somehow be represented at a certain location on the figure, which in turn needs to be represented in pixels on the screen. # Mathematically, such coordinate transformations are relatively straightforward, and Matplotlib has a well-developed set of tools that it uses internally to perform them (these tools can be explored in the ``matplotlib.transforms`` submodule). # # The average user rarely needs to worry about the details of these transforms, but it is helpful knowledge to have when considering the placement of text on a figure. There are three pre-defined transforms that can be useful in this situation: # # - ``ax.transData``: Transform associated with data coordinates # - ``ax.transAxes``: Transform associated with the axes (in units of axes dimensions) # - ``fig.transFigure``: Transform associated with the figure (in units of figure dimensions) # # Here let's look at an example of drawing text at various locations using these transforms: # + fig, ax = plt.subplots(facecolor='lightgray') ax.axis([0, 10, 0, 10]) # transform=ax.transData is the default, but we'll specify it anyway ax.text(1, 5, ". Data: (1, 5)", transform=ax.transData) ax.text(0.5, 0.1, ". Axes: (0.5, 0.1)", transform=ax.transAxes) ax.text(0.2, 0.2, ". Figure: (0.2, 0.2)", transform=fig.transFigure); # - # Note that by default, the text is aligned above and to the left of the specified coordinates: here the "." at the beginning of each string will approximately mark the given coordinate location. # # The ``transData`` coordinates give the usual data coordinates associated with the x- and y-axis labels. # The ``transAxes`` coordinates give the location from the bottom-left corner of the axes (here the white box), as a fraction of the axes size. # The ``transFigure`` coordinates are similar, but specify the position from the bottom-left of the figure (here the gray box), as a fraction of the figure size. # # Notice now that if we change the axes limits, it is only the ``transData`` coordinates that will be affected, while the others remain stationary: ax.set_xlim(0, 2) ax.set_ylim(-6, 6) fig # This behavior can be seen more clearly by changing the axes limits interactively: if you are executing this code in a notebook, you can make that happen by changing ``%matplotlib inline`` to ``%matplotlib notebook`` and using each plot's menu to interact with the plot. # ## Arrows and Annotation # # Along with tick marks and text, another useful annotation mark is the simple arrow. # # Drawing arrows in Matplotlib is often much harder than you'd bargain for. # While there is a ``plt.arrow()`` function available, I wouldn't suggest using it: the arrows it creates are SVG objects that will be subject to the varying aspect ratio of your plots, and the result is rarely what the user intended. # Instead, I'd suggest using the ``plt.annotate()`` function. # This function creates some text and an arrow, and the arrows can be very flexibly specified. # # Here we'll use ``annotate`` with several of its options: # + # %matplotlib inline fig, ax = plt.subplots() x = np.linspace(0, 20, 1000) ax.plot(x, np.cos(x)) ax.axis('equal') ax.annotate('local maximum', xy=(6.28, 1), xytext=(10, 4), arrowprops=dict(facecolor='black', shrink=0.05)) ax.annotate('local minimum', xy=(5 * np.pi, -1), xytext=(2, -6), arrowprops=dict(arrowstyle="->", connectionstyle="angle3,angleA=0,angleB=-90")); # - # The arrow style is controlled through the ``arrowprops`` dictionary, which has numerous options available. # These options are fairly well-documented in Matplotlib's online documentation, so rather than repeating them here it is probably more useful to quickly show some of the possibilities. # Let's demonstrate several of the possible options using the birthrate plot from before: # + fig, ax = plt.subplots(figsize=(12, 4)) births_by_date.plot(ax=ax) # Add labels to the plot ax.annotate("New Year's Day", xy=('2012-1-1', 4100), xycoords='data', xytext=(50, -30), textcoords='offset points', arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2")) ax.annotate("Independence Day", xy=('2012-7-4', 4250), xycoords='data', bbox=dict(boxstyle="round", fc="none", ec="gray"), xytext=(10, -40), textcoords='offset points', ha='center', arrowprops=dict(arrowstyle="->")) ax.annotate('Labor Day', xy=('2012-9-4', 4850), xycoords='data', ha='center', xytext=(0, -20), textcoords='offset points') ax.annotate('', xy=('2012-9-1', 4850), xytext=('2012-9-7', 4850), xycoords='data', textcoords='data', arrowprops={'arrowstyle': '|-|,widthA=0.2,widthB=0.2', }) ax.annotate('Halloween', xy=('2012-10-31', 4600), xycoords='data', xytext=(-80, -40), textcoords='offset points', arrowprops=dict(arrowstyle="fancy", fc="0.6", ec="none", connectionstyle="angle3,angleA=0,angleB=-90")) ax.annotate('Thanksgiving', xy=('2012-11-25', 4500), xycoords='data', xytext=(-120, -60), textcoords='offset points', bbox=dict(boxstyle="round4,pad=.5", fc="0.9"), arrowprops=dict(arrowstyle="->", connectionstyle="angle,angleA=0,angleB=80,rad=20")) ax.annotate('Christmas', xy=('2012-12-25', 3850), xycoords='data', xytext=(-30, 0), textcoords='offset points', size=13, ha='right', va="center", bbox=dict(boxstyle="round", alpha=0.1), arrowprops=dict(arrowstyle="wedge,tail_width=0.5", alpha=0.1)); # Label the axes ax.set(title='USA births by day of year (1969-1988)', ylabel='average daily births') # Format the x axis with centered month labels ax.xaxis.set_major_locator(mpl.dates.MonthLocator()) ax.xaxis.set_minor_locator(mpl.dates.MonthLocator(bymonthday=15)) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.xaxis.set_minor_formatter(mpl.dates.DateFormatter('%h')); ax.set_ylim(3600, 5400); # - # You'll notice that the specifications of the arrows and text boxes are very detailed: this gives you the power to create nearly any arrow style you wish. # Unfortunately, it also means that these sorts of features often must be manually tweaked, a process that can be very time consuming when producing publication-quality graphics! # Finally, I'll note that the preceding mix of styles is by no means best practice for presenting data, but rather included as a demonstration of some of the available options. # # More discussion and examples of available arrow and annotation styles can be found in the Matplotlib gallery, in particular the [Annotation Demo](http://matplotlib.org/examples/pylab_examples/annotation_demo2.html).
notebooks/Python-in-2-days/D1_L6_MatPlotLib_and_Seaborn/09-Text-and-Annotation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # [source](../../api/alibi_detect.cd.mmd.rst) # # Maximum Mean Discrepancy # # ## Overview # # The [Maximum Mean Discrepancy (MMD)](http://jmlr.csail.mit.edu/papers/v13/gretton12a.html) detector is a kernel-based method for multivariate 2 sample testing. The MMD is a distance-based measure between 2 distributions *p* and *q* based on the mean embeddings $\mu_{p}$ and $\mu_{q}$ in a reproducing kernel Hilbert space $F$: # # $$ # MMD(F, p, q) = || \mu_{p} - \mu_{q} ||^2_{F} # $$ # # We can compute unbiased estimates of $MMD^2$ from the samples of the 2 distributions after applying the kernel trick. We use by default a [radial basis function kernel](https://en.wikipedia.org/wiki/Radial_basis_function_kernel), but users are free to pass their own kernel of preference to the detector. We obtain a $p$-value via a [permutation test](https://en.wikipedia.org/wiki/Resampling_(statistics)) on the values of $MMD^2$. # # For high-dimensional data, we typically want to reduce the dimensionality before computing the permutation test. Following suggestions in [Failing Loudly: An Empirical Study of Methods for Detecting Dataset Shift](https://arxiv.org/abs/1810.11953), we incorporate Untrained AutoEncoders (UAE) and black-box shift detection using the classifier's softmax outputs ([BBSDs](https://arxiv.org/abs/1802.03916)) as out-of-the box preprocessing methods and note that [PCA](https://en.wikipedia.org/wiki/Principal_component_analysis) can also be easily implemented using `scikit-learn`. Preprocessing methods which do not rely on the classifier will usually pick up drift in the input data, while BBSDs focuses on label shift. # # Detecting input data drift (covariate shift) $\Delta p(x)$ for text data requires a custom preprocessing step. We can pick up changes in the semantics of the input by extracting (contextual) embeddings and detect drift on those. Strictly speaking we are not detecting $\Delta p(x)$ anymore since the whole training procedure (objective function, training data etc) for the (pre)trained embeddings has an impact on the embeddings we extract. The library contains functionality to leverage pre-trained embeddings from [HuggingFace's transformer package](https://github.com/huggingface/transformers) but also allows you to easily use your own embeddings of choice. Both options are illustrated with examples in the [Text drift detection on IMDB movie reviews](../../examples/cd_text_imdb.nblink) notebook. # ## Usage # # ### Initialize # # # Arguments: # # * `x_ref`: Data used as reference distribution. # # # Keyword arguments: # # * `backend`: Both **TensorFlow** and **PyTorch** implementations of the MMD detector as well as various preprocessing steps are available. Specify the backend (*tensorflow* or *pytorch*). Defaults to *tensorflow*. # # * `p_val`: p-value used for significance of the permutation test. # # * `preprocess_x_ref`: Whether to already apply the (optional) preprocessing step to the reference data at initialization and store the preprocessed data. Dependent on the preprocessing step, this can reduce the computation time for the predict step significantly, especially when the reference dataset is large. Defaults to *True*. It is possible that it needs to be set to *False* if the preprocessing step requires statistics from both the reference and test data, such as the mean or standard deviation. # # * `update_x_ref`: Reference data can optionally be updated to the last N instances seen by the detector or via [reservoir sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) with size N. For the former, the parameter equals *{'last': N}* while for reservoir sampling *{'reservoir_sampling': N}* is passed. # # * `preprocess_fn`: Function to preprocess the data before computing the data drift metrics. Typically a dimensionality reduction technique. # # * `kernel`: Kernel used when computing the MMD. Defaults to a Gaussian RBF kernel (`from alibi_detect.utils.pytorch import GaussianRBF` or `from alibi_detect.utils.tensorflow import GaussianRBF` dependent on the backend used). # # * `sigma`: Optional bandwidth for the kernel as a `np.ndarray`. We can also average over a number of different bandwidths, e.g. `np.array([.5, 1., 1.5])`. # # * `configure_kernel_from_x_ref`: If `sigma` is not specified, the detector can infer it via a heuristic and set `sigma` to the median pairwise distance between 2 samples. If `configure_kernel_from_x_ref` is *True*, we can already set `sigma` at initialization of the detector by inferring it from `x_ref`, speeding up the prediction step. If set to *False*, `sigma` is computed separately for each test batch at prediction time. # # * `n_permutations`: Number of permutations used in the permutation test. # # * `input_shape`: Optionally pass the shape of the input data. # # * `data_type`: can specify data type added to the metadata. E.g. *'tabular'* or *'image'*. # # # Additional PyTorch keyword arguments: # # * `device`: *cuda* or *gpu* to use the GPU and *cpu* for the CPU. If the device is not specified, the detector will try to leverage the GPU if possible and otherwise fall back on CPU. # # # Initialized drift detector example: # # # ```python # from alibi_detect.cd import MMDDrift # # cd = MMDDrift(x_ref, backend='tensorflow', p_val=.05) # ``` # # The same detector in PyTorch: # # ```python # cd = MMDDrift(x_ref, backend='pytorch', p_val=.05) # ``` # # We can also easily add preprocessing functions for both frameworks. The following example uses a randomly initialized image encoder in PyTorch: # # ```python # from functools import partial # import torch # import torch.nn as nn # from alibi_detect.cd.pytorch import preprocess_drift # # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # # # define encoder # encoder_net = nn.Sequential( # nn.Conv2d(3, 64, 4, stride=2, padding=0), # nn.ReLU(), # nn.Conv2d(64, 128, 4, stride=2, padding=0), # nn.ReLU(), # nn.Conv2d(128, 512, 4, stride=2, padding=0), # nn.ReLU(), # nn.Flatten(), # nn.Linear(2048, 32) # ).to(device).eval() # # # define preprocessing function # preprocess_fn = partial(preprocess_drift, model=encoder_net, device=device, batch_size=512) # # cd = MMDDrift(x_ref, backend='pytorch', p_val=.05, preprocess_fn=preprocess_fn) # ``` # The same functionality is supported in TensorFlow and the main difference is that you would import from `alibi_detect.cd.tensorflow import preprocess_drift`. Other preprocessing steps such as the output of hidden layers of a model or extracted text embeddings using transformer models can be used in a similar way in both frameworks. TensorFlow example for the hidden layer output: # # ```python # from alibi_detect.cd.tensorflow import HiddenOutput, preprocess_drift # # model = # TensorFlow model; tf.keras.Model or tf.keras.Sequential # preprocess_fn = partial(preprocess_drift, model=HiddenOutput(model, layer=-1), batch_size=128) # # cd = MMDDrift(x_ref, backend='tensorflow', p_val=.05, preprocess_fn=preprocess_fn) # ``` # # Check out the [Drift detection on CIFAR10](../../examples/cd_mmd_cifar10.nblink) example for more details. # # Alibi Detect also includes custom text preprocessing steps in both TensorFlow and PyTorch based on Huggingface's [transformers](https://github.com/huggingface/transformers) package: # # ```python # import torch # import torch.nn as nn # from transformers import AutoTokenizer # from alibi_detect.cd.pytorch import preprocess_drift # from alibi_detect.models.pytorch import TransformerEmbedding # # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # # model_name = 'bert-base-cased' # tokenizer = AutoTokenizer.from_pretrained(model_name) # # embedding_type = 'hidden_state' # layers = [5, 6, 7] # embed = TransformerEmbedding(model_name, embedding_type, layers) # model = nn.Sequential(embed, nn.Linear(768, 256), nn.ReLU(), nn.Linear(256, enc_dim)).to(device).eval() # preprocess_fn = partial(preprocess_drift, model=model, tokenizer=tokenizer, max_len=512, batch_size=32) # # # initialise drift detector # cd = MMDDrift(x_ref, backend='pytorch', p_val=.05, preprocess_fn=preprocess_fn) # ``` # # Again the same functionality is supported in TensorFlow but with `from alibi_detect.cd.tensorflow import preprocess_drift` and `from alibi_detect.models.tensorflow import TransformerEmbedding` imports. Check out the [Text drift detection on IMDB movie reviews](../../examples/cd_text_imdb.nblink) example for more information. # ### Detect Drift # # We detect data drift by simply calling `predict` on a batch of instances `x`. We can return the p-value and the threshold of the permutation test by setting `return_p_val` to *True* and the maximum mean discrepancy metric and threshold by setting `return_distance` to *True*. # # The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys: # # * `is_drift`: 1 if the sample tested has drifted from the reference data and 0 otherwise. # # * `p_val`: contains the p-value if `return_p_val` equals *True*. # # * `threshold`: p-value threshold if `return_p_val` equals *True*. # # * `distance`: MMD^2 metric between the reference data and the new batch if `return_distance` equals *True*. # # * `distance_threshold`: MMD^2 metric value from the permutation test which corresponds to the the p-value threshold. # # # ```python # preds = cd.predict(X, return_p_val=True, return_distance=True) # ``` # ### Saving and loading # # The drift detectors can be saved and loaded in the same way as other detectors: # # ```python # from alibi_detect.utils.saving import save_detector, load_detector # # filepath = 'my_path' # save_detector(cd, filepath) # cd = load_detector(filepath) # ``` # # Currently on the **TensorFlow** backend is supported for `save_detector` and `load_detector`. Adding **PyTorch** support is a near term priority. # ## Examples # # ### Graph # # [Drift detection on molecular graphs](../../examples/cd_mol.nblink) # # ### Image # # [Drift detection on CIFAR10](../../examples/cd_mmd_cifar10.nblink) # # ### Text # # [Text drift detection on IMDB movie reviews](../../examples/cd_text_imdb.nblink)
doc/source/cd/methods/mmddrift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dota Game Prediction import numpy as np import pandas as pd from sklearn.ensemble import RandomForestClassifier data=pd.read_csv(r"C:\Users\home\Desktop\Sahiloz\Datasets\dota.txt",header=None) data.head() #Find all the games whereupon team 1 won and count the instances of each character team1wins = data[data[10] == 1] team1wins = team1wins.drop(columns=[5, 6, 7, 8, 9, 10]) team1wins = pd.DataFrame(team1wins.values.flatten()) charactersTeam1 = team1wins[0].value_counts() #print(team1wins) print(charactersTeam1) #Find all the games whereupon team 2 won and count the instances of each character team2wins = data[data[10] == 2] team2wins = team2wins.drop(columns=[0, 1, 2, 3, 4, 10]) team2wins = pd.DataFrame(team2wins.values.flatten()) charactersTeam2 = team2wins[0].value_counts() print(charactersTeam2) #merge the two list found above and create a new table characterWins1 = pd.DataFrame(charactersTeam1) characterWins2 = pd.DataFrame(charactersTeam2) characterWins = pd.merge(characterWins1, characterWins2, left_index=True, right_index=True, how='outer' ) #print(characterWins) characterWins.rename(columns={'0_x':'Team_One','0_y':'Team_Two'}, inplace=True) #print(characterWins) characterWins['Total'] = characterWins['Team_One'] + characterWins['Team_Two'] print(characterWins) # # Now to encode the characters we will use the ration of total appearance in winning team to total appearance data2 = data.drop(columns=10) data2 = pd.DataFrame(data2.values.flatten()) totalPlayedChar = data2[0].value_counts() totalPlayedChar = pd.DataFrame(totalPlayedChar) #print(totalPlayedChar) characterWins = pd.merge(characterWins, totalPlayedChar, left_index=True, right_index=True, how='outer' ) characterWins['Win_Rate'] = characterWins['Total'] / characterWins[0] characterWins.drop(columns=['Team_One','Team_Two','Total' , 0], inplace=True) print(characterWins) winRate = characterWins['Win_Rate'].to_dict() data[0] = data[0].map(winRate) data[1] = data[1].map(winRate) data[2] = data[2].map(winRate) data[3] = data[3].map(winRate) data[4] = data[4].map(winRate) data[5] = -data[5].map(winRate) data[6] = -data[6].map(winRate) data[7] = -data[7].map(winRate) data[8] = -data[8].map(winRate) data[9] = -data[9].map(winRate) target = data[10] data.drop(columns=10, inplace=True) print(data.head()) #using randomforest classifier to train a model model = RandomForestClassifier(n_estimators=150, random_state=0) model.fit(data, target) # + # User input K = int(input()) #Enter Number of test cases for i in range(K): userInput = pd.DataFrame([input().split(",")]) #enter comma seperated characters userInput[0] = userInput[0].map(winRate) userInput[1] = userInput[1].map(winRate) userInput[2] = userInput[2].map(winRate) userInput[3] = userInput[3].map(winRate) userInput[4] = userInput[4].map(winRate) userInput[5] = -userInput[5].map(winRate) userInput[6] = -userInput[6].map(winRate) userInput[7] = -userInput[7].map(winRate) userInput[8] = -userInput[8].map(winRate) userInput[9] = -userInput[9].map(winRate) prediction = model.predict(userInput) for i in prediction: if i==1: print("Team 1 Won This round") else: print("Team 2 won This Round") # -
Dota2prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''base'': conda)' # language: python # name: python37664bitbaseconda7b426b25e72545719c547c8c46aaf5a2 # --- # + [markdown] colab_type="text" id="l_LulNCC8z96" # # Simple Linear Regression # + [markdown] colab_type="text" id="xpXdowrE9DxW" # ## Importing the libraries # - import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] colab_type="text" id="6JhpWJi59J1p" # ## Importing the dataset # - dataset = pd.read_csv('Salary_Data.csv') x = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # + [markdown] colab_type="text" id="AyhQaTwP9RzG" # ## Splitting the dataset into the Training set and Test set # - from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 1/3, random_state=0) # + [markdown] colab_type="text" id="ZijQwFMQ9itx" # ## Training the Simple Linear Regression model on the Training set # + from sklearn.linear_model import LinearRegression regressor = LinearRegression() regressor.fit(X_train, y_train) LinearRegression(copy_X=True, fit_intercept=True, n_jobs=None, normalize=False) # + [markdown] colab_type="text" id="wa2T1Lq89o5H" # ## Predicting the Test set results # - y_pred = regressor.predict(X_test) # + [markdown] colab_type="text" id="-zSoMZ-P9v8t" # ## Visualising the Training set results # - plt.scatter(X_train, y_train, color='red') plt.plot(X_train, regressor.predict(X_train), color='blue') plt.title('Salary vs Experience (Training set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show() # + [markdown] colab_type="text" id="EUX1Vhsv97ZT" # ## Visualising the Test set results # - plt.scatter(X_test, y_test, color='red') plt.plot(X_train, regressor.predict(X_train), color='blue') plt.title('Salary vs Experience (Test set)') plt.xlabel('Years of Experience') plt.ylabel('Salary') plt.show()
models/simple_linear_regression/simple_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: pluralsight # language: python # name: pluralsight # --- # ## Building Predictive Models import pandas as pd import os import numpy as np # ### Import Data # set the path of the processed data processed_data_path = os.path.join(os.path.pardir,'data','processed') train_file_path = os.path.join(processed_data_path, 'train.csv') test_file_path = os.path.join(processed_data_path, 'test.csv') train_df = pd.read_csv(train_file_path, index_col='PassengerId') test_df = pd.read_csv(test_file_path, index_col='PassengerId') train_df.info() test_df.info() # ### Data Preperation X = train_df.loc[:,'Age':].to_numpy().astype('float') y = train_df['Survived'].ravel() print(X.shape, y.shape) # train test split from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) print(X_train.shape, y_train.shape) print(X_test.shape, y_test.shape) # average survival in train and test print('mean survival in train : {0:.3f}'.format(np.mean(y_train))) print('mean survival in test : {0:.3f}'.format(np.mean(y_test))) # #### Check Scikit-Learn Version import sklearn sklearn.__version__ # ### Baseline Model # import function from sklearn.dummy import DummyClassifier # create and train model model_dummy = DummyClassifier(strategy='most_frequent', random_state=0) model_dummy.fit(X_train, y_train) print('score for baseline model : {0:.2f}'.format(model_dummy.score(X_test, y_test))) # peformance metrics from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score # accuracy score print('accuracy for baseline model : {0:.2f}'.format(accuracy_score(y_test, model_dummy.predict(X_test)))) # confusion matrix print('confusion matrix for baseline model: \n {0}'.format(confusion_matrix(y_test, model_dummy.predict(X_test)))) # precision and recall scores print('precision for baseline model : {0:.2f}'.format(precision_score(y_test, model_dummy.predict(X_test)))) print('recall for baseline model : {0:.2f}'.format(recall_score(y_test, model_dummy.predict(X_test)))) # ### First Kaggle Submission # converting to the matrix test_X = test_df.to_numpy().astype('float') # get predictions predictions = model_dummy.predict(test_X) df_submission = pd.DataFrame({'PassengerId': test_df.index, 'Survived' : predictions} ) df_submission.head() submission_data_path = os.path.join(os.path.pardir,'data','external') submission_file_path = os.path.join(submission_data_path, '01_dummy.csv') df_submission.to_csv(submission_file_path, index=False) def get_submission_file(model, filename): # converting to the matrix test_X = test_df.to_numpy().astype('float') # make predictions predictions = model.predict(test_X) # submission dataframe df_submission = pd.DataFrame({'PassengerId': test_df.index, 'Survived' : predictions}) # submission file submission_data_path = os.path.join(os.path.pardir,'data','external') submission_file_path = os.path.join(submission_data_path, filename) # write to the file df_submission.to_csv(submission_file_path, index=False) # get submission file get_submission_file(model_dummy, '01_dummy.csv') # ### Logistic Regression Model # import function from sklearn.linear_model import LogisticRegression # create model model_lr_1 = LogisticRegression(random_state=0) # train model model_lr_1.fit(X_train,y_train) # evaluate model print('score for logistic regression - version 1 : {0:.2f}'.format(model_lr_1.score(X_test, y_test))) # performance metrics # accuracy print('accuracy for logistic regression - version 1 : {0:.2f}'.format(accuracy_score(y_test, model_lr_1.predict(X_test)))) # confusion matrix print('confusion matrix for logistic regression - version 1: \n {0}'.format(confusion_matrix(y_test, model_lr_1.predict(X_test)))) # precision print('precision for logistic regression - version 1 : {0:.2f}'.format(precision_score(y_test, model_lr_1.predict(X_test)))) # recall print('recall for logistic regression - version 1 : {0:.2f}'.format(recall_score(y_test, model_lr_1.predict(X_test)))) # model coefficients model_lr_1.coef_ # ### Second Kaggle Submission # get submission file get_submission_file(model_lr_1, '02_lr.csv') # ### Part 2 # ### Hyperparameter Optimization # base model model_lr = LogisticRegression(max_iter=2000, random_state=0) from sklearn.model_selection import GridSearchCV parameters = {'C':[0.1, 1.0, 10.0, 50.0, 100.0], 'penalty' : ['l2']} clf = GridSearchCV(model_lr, param_grid=parameters, cv=3) clf.fit(X_train, y_train) clf.best_params_ print('best score : {0:.2f}'.format(clf.best_score_)) # evaluate model print('score for logistic regression - version 2 : {0:.2f}'.format(clf.score(X_test, y_test))) # ### Making Third Submission # get submission file get_submission_file(clf, '03_lr.csv') # ### Feature Normalization and Standardization from sklearn.preprocessing import MinMaxScaler, StandardScaler # #### Feature Normalization # feature normalization scaler = MinMaxScaler() X_train_scaled = scaler.fit_transform(X_train) X_train_scaled[:,0].min(),X_train_scaled[:,0].max() # normalize test data X_test_scaled = scaler.transform(X_test) # #### Feature Standardization # feature standardization scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) # #### Create model after standardization # base model model_lr = LogisticRegression(random_state=0) parameters = {'C':[0.1, 1.0, 10.0, 50.0, 100.0, 1000.0]} clf = GridSearchCV(model_lr, param_grid=parameters, cv=3) clf.fit(X_train_scaled, y_train) clf.best_score_ # evaluate model print('score for logistic regression - version 2 : {0:.2f}'.format(clf.score(X_test_scaled, y_test))) # ### Model Persistence # import pickle library import pickle # create the file paths model_file_path = os.path.join(os.path.pardir,'models','lr_model.pkl') scaler_file_path = os.path.join(os.path.pardir,'models','lr_scaler.pkl') # open the files to write model_file_pickle = open(model_file_path, 'wb') scaler_file_pickle = open(scaler_file_path, 'wb') # persist the model and scaler pickle.dump(clf, model_file_pickle) pickle.dump(scaler, scaler_file_pickle) # close the file model_file_pickle.close() scaler_file_pickle.close() # #### load the persisted file # open files in read mode model_file_pickle = open(model_file_path, 'rb') scaler_file_pickle = open(scaler_file_path, 'rb') # load files clf_loaded = pickle.load(model_file_pickle) scaler_loaded = pickle.load(scaler_file_pickle) # close files model_file_pickle.close() scaler_file_pickle.close() clf_loaded.best_score_ scaler_loaded # transform the test data using loaded scaler object X_test_scaled = scaler_loaded.transform(X_test) # calculate the score using loaded model object print('score for persisted logistic regression : {0:.2f}'.format(clf_loaded.score(X_test_scaled, y_test))) # ### Third Kaggle Submission def get_submission_file_with_standardization(model,filename, scaler): # converting to the matrix test_X = test_df.to_numpy().astype('float') # standardization test_X = scaler.transform(test_X) # make predictions predictions = model.predict(test_X) # submission dataframe df_submission = pd.DataFrame({'PassengerId': test_df.index, 'Survived' : predictions}) # submission file submission_data_path = os.path.join(os.path.pardir,'data','external') submission_file_path = os.path.join(submission_data_path, filename) # write to the file df_submission.to_csv(submission_file_path, index=False) # + jupyter={"outputs_hidden": true} # get submission file get_submission_file_with_standardization(clf, '04_lr.csv', scaler) # - # ### Random Forest Model from sklearn.ensemble import RandomForestClassifier model_rf_1 = RandomForestClassifier(random_state=0) model_rf_1.fit(X_train_scaled, y_train) # evaluate model print('score for random forest - version 1 : {0:.2f}'.format(model_rf_1.score(X_test_scaled, y_test))) # get submission file get_submission_file_with_standardization(model_rf_1, '04_rf.csv', scaler) # ### HyperParameter Tuning from sklearn.model_selection import GridSearchCV parameters = {'n_estimators':[10, 100, 200], 'min_samples_leaf':[1, 5,10,50], 'max_features' : ('auto','sqrt','log2'), } rf = RandomForestClassifier(random_state=0) clf = GridSearchCV(rf, parameters) clf.fit(X_train, y_train) clf.best_estimator_ # best score print('best score for random forest : {0:.2f}'.format(clf.best_score_)) # get submission file get_submission_file(clf, '05_rf.csv') # ### Confusion Metrics , Precision and Recall # + jupyter={"outputs_hidden": true} from sklearn.preprocessing import MinMaxScaler # + jupyter={"outputs_hidden": true} scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # + jupyter={"outputs_hidden": true} from sklearn.linear_model import LogisticRegression # + jupyter={"outputs_hidden": true} model = LogisticRegression() # + jupyter={"outputs_hidden": true} model.fit(X_train, y_train) # + jupyter={"outputs_hidden": true} from sklearn import metrics # + jupyter={"outputs_hidden": true} model.score(X_test, y_test) # + jupyter={"outputs_hidden": true} pred = model.predict(X_test) fpr, tpr, thresholds = metrics.roc_curve(y_test, pred) metrics.auc(fpr, tpr) # + jupyter={"outputs_hidden": true} # Predict on Final Test data # + jupyter={"outputs_hidden": true} test_X = test_df.as_matrix().astype('float') # + jupyter={"outputs_hidden": true} test_X = scaler.transform(test_X) # + jupyter={"outputs_hidden": true} predictions = model.predict_proba(test_X) # + jupyter={"outputs_hidden": true} print predictions.shape
notebooks/3.0-ak-building-predictive-model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # ## Problem 3.2: SIS model with m risk groups in R # # Author: <NAME> @emmaaccorsi # # Date: 2018-10-01 # Import libraries. library(deSolve) library(ggplot2) library(reshape2) # Specify parameter values. m<-5 #Number of risk groups beta<-0.0016*c(0,3,10,60,100)%*%t(c(0,3,10,60,100)) #Matrix of transmission rates between to risk group i (rows) from risk group j (columns) gamma<-rep(0.2,m) #Vector of recovery rate per risk group n<-c(0.06,0.31,0.52,0.08,0.03) #Vector of proportion of the population that are in each risk group x<-c(0.0,0.0,0.0,0.0,1e-5) #Vector of initial proportions of the population that are both infectious and in each risk group MaxTime<-30 #Number of years # Specify SIS model function. sis_ode <- function(times,x,parms){ with(as.list(c(parms,x)),{ # ODEs I<-matrix(x[1:m],nrow=m,ncol=1) dI<-+(beta%*%I)*(n-I)-gamma*I list(c(dI)) }) } # Run SIS model. times<-seq(0,MaxTime,1) parms<-list(beta=beta,gamma=gamma,n=n,m=m) sis_out <- as.data.frame(lsoda(x,times,sis_ode,parms)) # Create visualization with ggplot2 sis_out_long <- melt(sis_out,"time") #Collapse dataset from "wide" to "long" format for plotting ggplot(sis_out_long,aes(x=time,y=value,colour=variable,group=variable))+ # Add line geom_line(lwd=2)+ #Add labels labs(x="Time (Years)",y="Proportion of Population",color="Risk Group")
notebooks/kr08/3_2/r_desolve.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import os import numpy as np import importlib import matplotlib.pyplot as plt HEAD_DIR = os.path.dirname(os.path.realpath('.')) sys.path.append(HEAD_DIR) from orbit_cheby import orbit_cheby, nbody_reader importlib.reload(nbody_reader) importlib.reload(orbit_cheby) DATA_DIR = os.path.join(HEAD_DIR, 'dev_data') payne_file = os.path.join(DATA_DIR, '2022AA_demo.txt') mike_file = os.path.join(DATA_DIR, 'simulation_states.dat') # ## <NAME>'s demo file with presumably fake data. text_filepath = payne_file name, times, states = nbody_reader.parse_nbody_txt( text_filepath ) print(f"Data shape: {np.shape(states)}\n" f"time shape: {np.shape(times)}\n" f"Average timestep: {np.mean(times[1:]-times[:-1])} days") fig, ax = plt.subplots(2, 2, sharex='col', sharey='row') ax[0, 0].plot(states[:, 0], states[:, 1]) ax[0, 0].set_ylabel('y') ax[1, 0].plot(states[:, 0], states[:, 2]) ax[1, 0].set_ylabel('z') ax[1, 0].set_xlabel('x') ax[1, 1].plot(states[:, 1], states[:, 2]) ax[1, 1].set_ylabel('z') ax[1, 1].set_xlabel('y') fig.delaxes(ax[0, 1]) #ax[0, 0].set_aspect('equal') #ax[1, 0].set_aspect('equal') #ax[1, 1].set_aspect('equal') # + importlib.reload(nbody_reader) importlib.reload(orbit_cheby) MSCs = orbit_cheby.MSC_Loader(FROM_ARRAY = True , primary_unpacked_provisional_designations = name, times_TDB = times, statearray = states).MSCs print( 'Returned variable is of type %r , and length %r ' % (type(MSCs),len(MSCs) ) ) M3 = MSCs[0] print( 'Entry in list is of type', type(M3) ) print ( 'Number of sectors = ' , len(M3.sector_coeffs) ) sector_keys = list(M3.sector_coeffs) _ = [print (f'Shape of sector[{key}] = ' , M3.sector_coeffs[key].shape) for key in sector_keys] # + # ****************** Low-level call to get XYZ ****************** # artificially creating an array of evaluation times that will be within the validrange evaluation_times = np.arange(*M3.get_valid_range_of_dates()) XYZs = M3.generate_XYZ( evaluation_times ) # N.B. final shape = (3,len(times_TDB)) print('Final XYZs.shape = ', XYZs.shape) # Plot x == XYZs[0] against time ... every=1000 plt.plot( evaluation_times , XYZs[0] , '.' ) # - # ## An output file from the N-body integrator. text_filepath = mike_file name, times, states = nbody_reader.parse_nbody_txt( text_filepath ) print(f"Data shape: {np.shape(states)}\n" f"time shape: {np.shape(times)}\n" f"Average timestep: {np.mean(times[1:]-times[:-1])} days") fig, ax = plt.subplots(2, 2, sharex='col', sharey='row') ax[0, 0].plot(states[:, 0], states[:, 1]) ax[0, 0].set_ylabel('y') ax[1, 0].plot(states[:, 0], states[:, 2]) ax[1, 0].set_ylabel('z') ax[1, 0].set_xlabel('x') ax[1, 1].plot(states[:, 1], states[:, 2]) ax[1, 1].set_ylabel('z') ax[1, 1].set_xlabel('y') fig.delaxes(ax[0, 1]) #ax[0, 0].set_aspect('equal') #ax[1, 0].set_aspect('equal') #ax[1, 1].set_aspect('equal') importlib.reload(nbody_reader) importlib.reload(orbit_cheby) MSCs = orbit_cheby.MSC_Loader(FROM_ARRAY = True , primary_unpacked_provisional_designations = name, times_TDB = times[:], statearray = states[:, :6]).MSCs print( 'Returned variable is of type %r , and length %r ' % (type(MSCs),len(MSCs) ) ) M2 = MSCs[0] print( 'Entry in list is of type', type(M2) ) print ( 'Number of sectors = ' , len(M2.sector_coeffs) ) sector_keys = list(M2.sector_coeffs) _ = [print (f'Shape of sector[{key}] = ' , M2.sector_coeffs[key].shape) for key in sector_keys] # + # ****************** Low-level call to get XYZ ****************** evaluation_times = np.arange(*M2.get_valid_range_of_dates()) XYZs = M2.generate_XYZ( evaluation_times ) # N.B. final shape = (3,len(times_TDB)) print('Final XYZs.shape = ', XYZs.shape) # Plot x == XYZs[0] against time ... every=1000 plt.plot( evaluation_times , XYZs[0] , '.' ) # - # ## <NAME>'s demo data with some noise added. text_filepath = payne_file name, times, states = nbody_reader.parse_nbody_txt( text_filepath ) ntimes = len(times) # Add noise of ~ 1.5 km and 1.5 km/day # (which is obviously a stupendous amount of noise) states = states[:, :6] + np.array([1.01E-8] * 3 + [1E-8] * 3) * np.random.random([ntimes, 6]) print(f"Data shape: {np.shape(states)}\n" f"time shape: {np.shape(times)}\n" f"Average timestep: {np.mean(times[1:]-times[:-1])} days") fig, ax = plt.subplots(2, 2, sharex='col', sharey='row') ax[0, 0].plot(states[:, 0], states[:, 1]) ax[0, 0].set_ylabel('y') ax[1, 0].plot(states[:, 0], states[:, 2]) ax[1, 0].set_ylabel('z') ax[1, 0].set_xlabel('x') ax[1, 1].plot(states[:, 1], states[:, 2]) ax[1, 1].set_ylabel('z') ax[1, 1].set_xlabel('y') fig.delaxes(ax[0, 1]) #ax[0, 0].set_aspect('equal') #ax[1, 0].set_aspect('equal') #ax[1, 1].set_aspect('equal') MSCs = orbit_cheby.MSC_Loader(FROM_ARRAY = True , primary_unpacked_provisional_designations = name, times_TDB = times, statearray = states[:, :6]).MSCs print( 'Returned variable is of type %r , and length %r ' % (type(MSCs),len(MSCs) ) ) M1 = MSCs[0] print( 'Entry in list is of type', type(M1) ) print ( 'Number of sectors = ' , len(M1.sector_coeffs) ) sector_keys = list(M1.sector_coeffs) _ = [print (f'Shape of sector[{key}] = ' , M1.sector_coeffs[key].shape) for key in sector_keys] # + # ****************** Low-level call to get XYZ ****************** evaluation_times = np.arange(*M2.get_valid_range_of_dates()) XYZs = M1.generate_XYZ( evaluation_times ) # N.B. final shape = (3,len(times_TDB)) print('Final XYZs.shape = ', XYZs.shape) # Plot x == XYZs[0] against time ... plt.plot( evaluation_times, XYZs[0] , '.' ) # -
notebooks/archaic/Playing_with_Cheby.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="itFBHXpvb69u" outputId="d5f03485-1981-4e29-ee52-5737a4c01734" colab={"base_uri": "https://localhost:8080/", "height": 34} from google.colab import drive drive.mount('/gdrive') # + id="mR5fukmrb8r5" import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline # + id="H1DNlJNDcSjc" path = '/gdrive/My Drive/Lemalabs/Data/' data = pd.read_csv(path + "online_shoppers_intention.csv") # + id="5QYRyoJfczsD" outputId="b57a7b27-0522-487e-c147-f712ea735ef3" colab={"base_uri": "https://localhost:8080/", "height": 215} data.head() # + id="1Pe2iG6Mc5XW" outputId="2cce885c-178b-4bf1-caea-3b7233d5512c" colab={"base_uri": "https://localhost:8080/", "height": 35} data.shape # + id="USSQMWPSc9gq" from sklearn.preprocessing import LabelEncoder month_labelencoder = LabelEncoder() data["Month"] = month_labelencoder.fit_transform(data["Month"]) visitor_labelencoder = LabelEncoder() data["VisitorType"] = visitor_labelencoder.fit_transform(data["VisitorType"]) weekend_labelencoder = LabelEncoder() data["Weekend"] = weekend_labelencoder.fit_transform(data["Weekend"]) revenue_labelencoder = LabelEncoder() data["Revenue"] = revenue_labelencoder.fit_transform(data["Revenue"]) # + id="bJhMmm5ac-L4" xcol = data.drop(columns = ["Revenue"]) ycol = data["Revenue"] # + id="0p446cdGdJ1d" outputId="9a4b0e5b-485f-4182-a883-193189bc4028" colab={"base_uri": "https://localhost:8080/", "height": 34} x = np.array(xcol) y = np.array(ycol) y = np.reshape(y, (-1,1)) x.shape , y.shape # + id="1VmtMljddMqO" from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.25,random_state = 2) # + id="X8vtcYZXdRJI" from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.model_selection import StratifiedKFold # + id="al6NChkMfQd_" outputId="142e81ae-fa9f-4cfd-f534-9b4208380ffc" colab={"base_uri": "https://localhost:8080/", "height": 34} fold = StratifiedKFold(n_splits=5, random_state = 1, shuffle = True) fold.get_n_splits(x_train,y_train) # + id="998Pcr9MhD-7" outputId="0ddb771b-b0d2-41cc-9b2e-dc8dfa24d6ab" colab={"base_uri": "https://localhost:8080/", "height": 190} val =list(np.arange(0.01,10,1)) val # + id="-2mxOtFRflfW" outputId="15c7e9d2-3bca-4509-8b3d-0ad2b92e45a0" colab={"base_uri": "https://localhost:8080/", "height": 1000} parameters = {'solver':('newton-cg', 'liblinear', 'sag', 'saga'), 'C':val} model = LogisticRegression(penalty = 'l2', n_jobs=-2, max_iter = 10000) log_reg = GridSearchCV(model, parameters, n_jobs= -2, cv=fold) log_reg.fit(x_train, y_train) # + id="aDPMF6dAhCOD" outputId="806aa892-e2e2-4c82-b71a-cb699fc24cca" colab={"base_uri": "https://localhost:8080/", "height": 34} print(log_reg.best_params_) # + id="7POdvmp5kifO" log_reg_train_pred = log_reg.predict(x_train) log_reg_test_pred = log_reg.predict(x_test) # + id="JvvkzPIlmbtW" outputId="a7ba16c6-be6b-4ffb-f88d-a9d75ef09924" colab={"base_uri": "https://localhost:8080/", "height": 185} from sklearn.metrics import classification_report, accuracy_score print(classification_report(y_test,log_reg_test_pred)) ac = accuracy_score(y_test, log_reg_test_pred) print('test_accuarcy = {} %'.format(ac*100)) # + id="GnRO_fkDnKOH" outputId="32892d00-e997-41bc-c21d-2551a580c68b" colab={"base_uri": "https://localhost:8080/", "height": 1000} from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import fbeta_score, make_scorer f_scorer = make_scorer(fbeta_score, beta=1, pos_label = 1) params = {'n_neighbors':[1, 100, 10, 5, 6]} KNN = KNeighborsClassifier(algorithm='brute') Knn_grid = GridSearchCV(KNN, params, n_jobs= -2, cv=fold, scoring=f_scorer, verbose=True) Knn_grid.fit(x_train, y_train) # + id="Vc5TxibloZaK" outputId="f23a777a-68e4-4d99-95d9-d3ca3db112ae" colab={"base_uri": "https://localhost:8080/", "height": 34} print(Knn_grid.best_params_) # + id="rEV_PlSEorUP" outputId="8fa2803d-94bc-491e-82cd-da74388eab21" colab={"base_uri": "https://localhost:8080/", "height": 185} yt_pred_knn = Knn_grid.predict(x_test) print(classification_report(y_test, yt_pred_knn )) ac2= accuracy_score(y_test, yt_pred_knn) print('test_accuracy = {} % '.format(ac2 * 100)) # + id="dZ0Mh3EGpEhy" outputId="d0c7928b-03f7-4cc5-cb67-15264f27f89d" colab={"base_uri": "https://localhost:8080/", "height": 87} from sklearn.naive_bayes import ComplementNB NB_comp = ComplementNB() NB_comp.fit(x_train, y_train) # + id="qaDlQzIkruGt" y_pred_NB_comp= NB_comp.predict(x_test) # + id="zjF5M0wLrsGM" outputId="d1a1a3b1-a50e-4284-f518-2f5f12471bc1" colab={"base_uri": "https://localhost:8080/", "height": 168} from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_NB_comp)) # + id="5GJAtNStrxcG" outputId="30d3dd58-3800-4bac-ce52-0c6d09df2dd0" colab={"base_uri": "https://localhost:8080/", "height": 34} ac3= accuracy_score(y_test, y_pred_NB_comp) print('test_accuracy = {} % '.format(ac3 * 100)) # + id="6qIitIg0sJ4-" outputId="51f828c5-d359-494b-c910-36d64ac21f0b" colab={"base_uri": "https://localhost:8080/", "height": 118} from sklearn.tree import DecisionTreeClassifier dtree = DecisionTreeClassifier(criterion = 'gini') dtree.fit(x_train, y_train) # + id="nBbcGrSmsShk" y_pred_dtree= dtree.predict(x_test) # + id="ZWHfxHOLsWo1" outputId="80126e69-3871-4458-870b-bcfaa8b87a56" colab={"base_uri": "https://localhost:8080/", "height": 199} from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_dtree)) ac4= accuracy_score(y_test, y_pred_dtree) print('test_accuracy = {} % '.format(ac4 * 100)) # + id="ZfYNU5J5sgOh" prune_path = dtree.cost_complexity_pruning_path(x_train, y_train) alphas = prune_path.ccp_alphas impurities = prune_path.impurities new_dTrees = [] i=0 for alpha in alphas: new_dTree = DecisionTreeClassifier(criterion='gini', ccp_alpha=alpha) new_dTree.fit(x_train, y_train) new_dTrees.append(new_dTree) # + id="FBX7tK89tYfj" outputId="dc4254da-de64-4872-a50b-be23bb45a753" colab={"base_uri": "https://localhost:8080/", "height": 181} test_scores = [new_dTree.score(x_test, y_test) for new_dTree in new_dTrees] print('max f1 test_score = {}'.format(max(test_scores))) print('position of max f1 = {}'.format(np.argmax(test_scores))) pruned_dTree = DecisionTreeClassifier(criterion='gini', ccp_alpha=alphas[np.argmax(test_scores)]) pruned_dTree.fit(x_train, y_train) # + id="b_t4HwRluHDy" outputId="e4783092-a5bd-4964-d4f2-288b0ccbee46" colab={"base_uri": "https://localhost:8080/", "height": 181} ytpred_dTree = pruned_dTree.predict(x_test) from sklearn.metrics import classification_report print(classification_report(y_test, ytpred_dTree)) # + id="XyJ8KZR9uKWV" outputId="adb4e899-c0b1-4f18-bcae-cf9acca5887a" colab={"base_uri": "https://localhost:8080/", "height": 35} ac5= accuracy_score(y_test, ytpred_dTree) print('test_accuracy = {} % '.format(ac5 * 100)) # + id="q-IzArccuX4K" logreg_probs = log_reg.predict_proba(x_test) Knn_grid_probs = Knn_grid.predict_proba(x_test) NB_comp_probs = NB_comp.predict_log_proba(x_test) Dtree_probs = pruned_dTree.predict_proba(x_test) # + id="B7kHi7RPwb-H" # keep probabilities for the positive outcome only logreg_probs = logreg_probs[:, 1] Knn_grid_probs = Knn_grid_probs[:, 1] NB_comp_probs = NB_comp_probs[:, 1] Dtree_probs = Dtree_probs[:, 1] # + id="ZO1An6VUwgfV" outputId="cc9d82a5-7d12-4784-ee6b-7103a443220e" colab={"base_uri": "https://localhost:8080/", "height": 90} from sklearn.metrics import roc_auc_score # calculate scores logreg_auc = roc_auc_score(y_test, logreg_probs) Knn_grid_auc = roc_auc_score(y_test, Knn_grid_probs) NB_comp_auc = roc_auc_score(y_test, NB_comp_probs) Dtree_auc = roc_auc_score(y_test, Dtree_probs) print('Logistic: ROC AUC= {} %'.format(round(logreg_auc, 3)*100)) print('KNN: ROC AUC= {} %'.format(round(Knn_grid_auc, 3)*100)) print('NB: ROC AUC= {} %'.format(round(NB_comp_auc, 3)*100)) print('DTree: ROC AUC= {} %'.format(round(Dtree_auc, 3)*100)) # + id="KkibqYOgwkCO" outputId="3cc0782b-0184-4339-dc7f-dce5eeeab1aa" colab={"base_uri": "https://localhost:8080/", "height": 334} from sklearn.metrics import roc_curve #fpr --> False Positive Rate #tpr --> True Positive Rate logreg_fpr, logreg_tpr, lr_thres = roc_curve(y_test, logreg_probs) Knn_fpr, Knn_tpr, Knn_thres = roc_curve(y_test, Knn_grid_probs) NB_fpr, NB_tpr, NB_thres = roc_curve(y_test, NB_comp_probs) Dtree_fpr, Dtree_tpr, Dtree_thres = roc_curve(y_test, Dtree_probs) fig, ax = plt.subplots(figsize=(10, 5)) ax.plot(logreg_fpr, logreg_tpr, marker='.', label='Logistic') ax.plot(Knn_fpr, Knn_tpr, marker='.', label='KNN') ax.plot(NB_fpr, NB_tpr, marker='.', label='NB') ax.plot(Dtree_fpr, Dtree_tpr, marker='.', label='DTree') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend(loc=5) # show the plot plt.show() # + id="-N9pPAq7wqhV" outputId="ed1949be-8a9c-4623-e7d3-9ce86c52b461" colab={"base_uri": "https://localhost:8080/", "height": 219} from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=1000) rf.fit(x_train,y_train) # + id="fqtFqUYp019P" outputId="5089fab5-a8cb-4a60-a9d6-3ad5accec1e7" colab={"base_uri": "https://localhost:8080/", "height": 181} y_pred_rf = rf.predict(x_test) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_rf)) # + id="ecRRsjuh1GOS" outputId="941e8190-6969-497e-e332-78b171fa5eea" colab={"base_uri": "https://localhost:8080/", "height": 35} ac6= accuracy_score(y_test, y_pred_rf) print('test_accuracy = {} % '.format(ac6 * 100)) # + id="RsARaWXO1RTM" outputId="9344d17e-3b67-4418-b437-dd321299fa0e" colab={"base_uri": "https://localhost:8080/", "height": 35} rf_probs = rf.predict_proba(x_test) rf_probs = rf_probs[:, 1] rf_auc = roc_auc_score(y_test,rf_probs) print('Random Forest: ROC AUC = {}%'.format(round(rf_auc,3)*100)) # + id="D40Ez4fA2HM3" rf_fpr, rf_tpr, rf_thres = roc_curve(y_test, rf_probs) # + id="1gP5PHGO1yTe" outputId="d7e14233-dd86-40b5-ada3-7f8249d4545d" colab={"base_uri": "https://localhost:8080/", "height": 497} fig, ax = plt.subplots(figsize=(10,8)) ax.plot(logreg_fpr, logreg_tpr, marker='.', label='Logistic') ax.plot(Knn_fpr, Knn_tpr, marker='.', label='KNN') ax.plot(NB_fpr, NB_tpr, marker='.', label='NB') ax.plot(Dtree_fpr, Dtree_tpr, marker='.', label='DTree') ax.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend(loc=5) # show the plot plt.show() # + id="-FaCNi9-2FBj" outputId="1291aea7-6192-4d2b-a10b-98a8a4a3fecd" colab={"base_uri": "https://localhost:8080/", "height": 110} from sklearn.preprocessing import OneHotEncoder xtrain, x_train_lr,ytrain, y_train_lr = train_test_split(x_train, y_train, test_size = 0.60, shuffle = True) rf = RandomForestClassifier(n_estimators=1000) rf.fit(xtrain, ytrain) rf_enc = OneHotEncoder() rf_enc.fit(rf.apply(xtrain)) rf_enc # + id="WuIzOotH7tzV" outputId="74e484e7-48ce-42df-9a51-4d8065289fef" colab={"base_uri": "https://localhost:8080/", "height": 35} f = rf_enc.transform(rf.apply(x_train_lr)) f.shape, x_train_lr.shape # + id="UfQm-Q5o6g4G" outputId="57d6c2ae-6fc8-4549-f99a-9542827ee404" colab={"base_uri": "https://localhost:8080/", "height": 274} fold = StratifiedKFold(n_splits=5, random_state=1, shuffle= True) fold.get_n_splits(x_train_lr, y_train_lr) parameters = { 'solver':('liblinear','sag'), 'C':[0.001]} model = LogisticRegression(penalty = 'l2', n_jobs =-2, max_iter=10000) rf_lr = GridSearchCV(model, parameters,n_jobs = -2, cv = fold) rf_lr.fit(rf_enc.transform(rf.apply(x_train_lr)), y_train_lr) print(rf_lr.best_params_) # + id="32IGt7wO8_wH" yprobs_rf_lr = rf_lr.predict_proba(rf_enc.transform(rf.apply(x_test))) yprobs_rf_lr = yprobs_rf_lr[:,1] # + id="5SGj2_Cy9uUx" outputId="af17375c-5b20-42b5-bc84-cd6581109901" colab={"base_uri": "https://localhost:8080/", "height": 35} rf_lr_auc = roc_auc_score(y_test, yprobs_rf_lr) print('Logistic and Random Forest: ROC AUC = {}%'.format(round(rf_lr_auc,3)*100)) # + id="g3JhXTCG-FTG" fpr_rf_lr,tpr_rf_lr, thres_rf_lr = roc_curve(y_test, yprobs_rf_lr) # + id="yJbP-TkL-TNe" outputId="572405d0-c86b-41c3-993b-3f2420cb85aa" colab={"base_uri": "https://localhost:8080/", "height": 497} fig, ax = plt.subplots(figsize=(10,8)) ax.plot(logreg_fpr, logreg_tpr, marker='.', label='Logistic') ax.plot(Knn_fpr, Knn_tpr, marker='.', label='KNN') ax.plot(NB_fpr, NB_tpr, marker='.', label='NB') ax.plot(Dtree_fpr, Dtree_tpr, marker='.', label='DTree') ax.plot(rf_fpr, rf_tpr, marker='.', label='Random Forest') ax.plot(fpr_rf_lr, tpr_rf_lr, marker = '.',label = 'LR + RF') # axis labels plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') # show the legend plt.legend(loc=5) # show the plot plt.show() # + id="qNDEnN41HoXv"
Model/Ensembles_Online_shoppers'_intention.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import tensorflow as tf import numpy as np from scipy.integrate import odeint from scipy.interpolate import interp1d import time # + def odeSolve(nPat): Atimesj = [] Btimesj = [] tspan = np.linspace(0, 100, 101) for i in range(nPat): tmp_doseA, tmp_doseB = np.zeros(shape=tspan.shape), np.zeros(shape=tspan.shape) for trange, dose in [ ([ 5, 15], 3 ), ([ 35, 50], 35 ), ([ 50, 60], 3 ), ([ 60, 75], 300 ), ([ 75, 80], 7.6 ) ]: twindow = range(trange[0], trange[1] + 1) tmp_doseA[twindow] = dose for trange, dose in [ ([ 5, 15], 70 ), ([ 35, 50], 12.5 ), ([ 75, 80], 7.6 ) ]: twindow = range(trange[0], trange[1] + 1) tmp_doseB[twindow] = dose Atimesj.append(tmp_doseA) Btimesj.append(tmp_doseB) Atimesj = np.array(Atimesj).reshape(nPat, -1) Btimesj = np.array(Btimesj).reshape(nPat, -1) AjInterp = [interp1d(tspan, a_vec, bounds_error=False, fill_value=(a_vec[0], a_vec[-1])) for a_vec in Atimesj] BjInterp = [interp1d(tspan, b_vec, bounds_error=False, fill_value=(b_vec[0], b_vec[-1])) for b_vec in Btimesj] fj = np.hstack([np.array([12 , 7, 15 ])] * nPat).reshape(nPat, -1) rj = np.hstack([np.array([6 , 3, 8 ])] * nPat).reshape(nPat, -1) mj = np.hstack([np.array([10 , 17, 2 ])] * nPat).reshape(nPat, -1) def rhs(y, t, fj, rj, mj): try: Nnt = np.array(y).reshape(nPat, -1) # Aj = np.array([interp(t) for interp in AjInterp]).reshape(nPat, -1) # Bj = np.array([interp(t) for interp in BjInterp]).reshape(nPat, -1) # results = fj - rj * Nnt / (1 + Aj) - mj * Nnt / (1 + Bj) results = fj - rj * Nnt - mj * Nnt results = results.flatten() return results except Exception as e: print(t, str(e)) return rhs_results args = (fj, rj, mj) # start = time.time() # Aj_t = np.array([interp(1.231) for interp in AjInterp]).reshape(nPat, -1) # timeCost_Aj = time.time() - start start = time.time() y, report = odeint(rhs, y0=np.array([1, 1, 1] * nPat), t=np.linspace(0, 100, 101), args=args, full_output=True) timeCost = time.time() - start return y, timeCost, report # - for n in [1, 10, 100, 1000]: y, timeCost, report = odeSolve(nPat=n) print(report['nst'][-1], report['nfe'][-1], report['nje'][-1]) print('N', n, 'timeCost', timeCost, 'per User', timeCost / n)
src/.ipynb_checkpoints/test_NNparts_numpy-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import operator # %matplotlib inline data = pd.read_csv('train_AV3.csv') data data.head() data.shape data.info() data.describe() data.info() data.hist(bins=50, figsize=(20, 15)) sns.pairplot(data) data.corr() sns.heatmap(data.corr()) a=[] a=data['Loan_Status'] l=[] for i in a: if(i=='Y'): l.append('1') else: l.append('0') l # + #data.dropna(axis=1) # - X = data[['ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History']].values Y = l from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) from sklearn.svm import SVC model1 = SVC() model1.fit(X_train, Y_train)
Project-2_techno.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + id="SFfJP-yR_6UN" import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LogisticRegression plt.style.use('ggplot') # + [markdown] id="w5cnY5Qf_6UQ" # ## Data generation # # We generate a dataset inspired by the motivating example. In particular, we assume that there are two types of displays. Displays of type A influence user's behavior and increase conversion probability. On the other hand, displays of type B do not influence conversion probability at all. However, they always appear right before conversion. We capture the essence of this example by assuming that there are only two types of scenarios. The first type of scenario, $s_1$ consists of a single display A and happens with probability $p$. The second type of scenario, $s_2$ consists of a display A followed by a display B and happens with probability $1-p$. In both cases, a conversion happens with probability 0.5. With our notation, this means that $V(s_1)=V(s_2)=0.5$. # + id="T9CcMRGg_6UR" outputId="09fa97bd-78f5-4d8d-809c-7706e718c4dd" colab={"base_uri": "https://localhost:8080/", "height": 204} def generate_dataset(n_samples=10000, seed=None, scenario_1_proba=1/3): np.random.seed(seed) scenario_1_conversion_proba = 0.5 scenario_2_conversion_proba = 0.6 data_rows = [] for i in range(n_samples): sampled_scenario = np.random.choice([1, 2], p=[scenario_1_proba, 1 - scenario_1_proba]) if sampled_scenario == 1: data_rows += [ {"uid": i, "features": [1, 0], "conversion": int(np.random.rand() < scenario_1_conversion_proba), "display type": "Display A"} ] elif sampled_scenario == 2: data_rows += [ {"uid": i, "features": [1, 0], "conversion": 0, "display type": "Display A"}, {"uid": i, "features": [0, 1], "conversion": int(np.random.rand() < scenario_2_conversion_proba), "display type": "Display B"}, ] data = pd.DataFrame(data_rows) total_uid_rewards_df = data.groupby('uid').agg({'conversion': 'sum'}).reset_index() total_uid_rewards_df.columns = ['uid', 'total_uid_rewards'] data = data.merge(total_uid_rewards_df) return data generate_dataset(n_samples=5).head() # + [markdown] id="fBL0WGfV_6UW" # ## MTC likelihood # We call MTC empirical likelihood # # $$ # f(\nu) = \sum_{n = 1}^N \Bigg\{ V(s_n) \cdot \log \bigg( \sum_{i = 1}^{\ell(s)} \nu({s_n}^i) \bigg) - \sum_{i = 1}^{\ell(s)} \nu({s_n}^i) \Bigg\} # $$ # + id="B3kjU0oL_6UW" def mtc_likelihood(model_predictions, timeline_ids, uid_rewards): # Gather the attributions per timeline and obtain the sum of the predictions per timeline predictions_per_display = pd.DataFrame({ 'timeline_id': timeline_ids, 'prediction': model_predictions, 'uid_reward': uid_rewards, }) sum_predictions_by_timeline = ( predictions_per_display .groupby('timeline_id') .agg(sum_predictions=('prediction', 'sum'), uid_reward=('uid_reward', 'first')) .reset_index() ) mtc_llh = ( sum_predictions_by_timeline['uid_reward'] * np.log(sum_predictions_by_timeline['sum_predictions']) - sum_predictions_by_timeline['sum_predictions'] ) return mtc_llh.mean() # + [markdown] id="lcf1spyu_6UY" # ## Fixed point algorithm # + [markdown] id="bMatCVxA_6UZ" # We are implementing the following fixed-point algorithm # # 1. Generate dataset # $$ \mathcal{D}^{(k + 1)} \;\; = \;\; \big\{ # (x, y) = \big((s_n)^j, \mu^{(k)}(j, s_n) \big) \text{ for } n \in [N] \text{ and } j \in [\ell(s_n)] # \big\}$$ # # 2. With an averaging or a ML algorithm, train $\nu^{(k + 1)}$ on $\mathcal{D}^{(k + 1)}$ # $$ \nu^{(k + 1)} \;\; = \;\; (s) \mapsto \mathbb{E}_{(x, y) \in \mathcal{D}^{(k + 1)}} \big[ y\; | \; x=s \big] $$ # # 3. Set $\mu$ with fixed-point characterization # $$\mu^{(k + 1)} \;\; = \;\; (i, s) \mapsto \frac{\nu^{(k + 1)}(s^i)}{\sum_{j = 1}^{\ell(s)} \nu^{(k + 1)}(s^j)} V(s)$$ # + id="OqNWAeti_6UZ" def average_display_valuation_per_display_type(dataset): return ( dataset.groupby('display type') .agg(scenario_valuation=('labels', 'mean')) .reset_index() ) def compute_display_valuations_by_averaging(dataset): scenario_valuation = average_display_valuation_per_display_type(dataset) return dataset.merge(scenario_valuation, on='display type', how='left') def run_one_iteration(original_dataset, compute_display_valuations): current_dataset = original_dataset.copy() # Step 2, compute valuations current_dataset = compute_display_valuations(current_dataset) # Step 3, compute new labels proportionnaly to the display valuations total_uid_valuation = ( current_dataset .groupby('uid') .agg(total_uid_valuation=('scenario_valuation', 'sum')) .reset_index() ) merged_dataset = current_dataset.merge(total_uid_valuation, on='uid', how='left') current_dataset['labels'] = ( merged_dataset['scenario_valuation'] / merged_dataset['total_uid_valuation'] * merged_dataset['total_uid_rewards'] ) return current_dataset[original_dataset.columns] def compute_metrics(original_dataset, compute_display_valuations): current_dataset = original_dataset.copy() scenario_valuation = average_display_valuation_per_display_type(current_dataset) scenario_valuation_dict = { row['display type']: row['scenario_valuation'] for _, row in scenario_valuation.iterrows() } dataset_with_valuations = current_dataset.merge(scenario_valuation, on='display type', how='left') current_mtc_likelihood = mtc_likelihood( dataset_with_valuations['scenario_valuation'], dataset_with_valuations['uid'], dataset_with_valuations['total_uid_rewards'] ) return scenario_valuation_dict, current_mtc_likelihood # + id="YjNX-0aW_6Ub" current_dataset = generate_dataset(seed=2390) # Initialize with last touch: last display beforethe conversion gets all credit current_dataset['labels'] = current_dataset['conversion'].values scenario_valuations = [] mtc_likelihoods = [] for _ in range(30): current_dataset = run_one_iteration(current_dataset, compute_display_valuations_by_averaging) scenario_valuation, current_mtc_likelihood = compute_metrics(current_dataset, compute_display_valuations_by_averaging) scenario_valuations += [scenario_valuation] mtc_likelihoods += [current_mtc_likelihood] # + id="DCaaFJer_6Ud" outputId="47fb2d2f-a593-44f0-c51a-bc6fbb3b4437" colab={"base_uri": "https://localhost:8080/", "height": 299} def plot_scenario_valuation(scenario_valuations, ax): display_types = sorted(list(scenario_valuations[0].keys())) for display_type in display_types: ax.plot([valuations[display_type] for valuations in scenario_valuations], label=display_type) ax.set_title('Convergence of display valuation') ax.set_ylabel('display valuation') ax.set_xlabel('iteration number') ax.set_xlim((ax.get_xlim()[0], ax.get_xlim()[1] * 1.2)) # Add some space on the right ax.set_ylim([0., 0.8]) ax.legend(loc='upper right') def plot_mtc_likelihood(mtc_likelihoods, ax): ax.set_title('Convergence of the MTC likelihood') ax.plot(mtc_likelihoods) ax.set_ylabel('MTC likelihood') ax.set_xlabel('iteration number') llh_init = mtc_likelihoods[0] llh_conv = mtc_likelihoods[-1] ax.scatter(len(mtc_likelihoods) - 1, llh_conv, s=60, label=f'MTC : {llh_conv:.4f}', c='C0') ax.scatter(0, llh_init, s=60, label=f'last touch : {llh_init:.4f}', c='C1') ax.legend() def plot_metrics(scenario_valuations, mtc_likelihoods): fig, axes = plt.subplots(1, 2, figsize=(14, 4)) plot_scenario_valuation(scenario_valuations, axes[0]) plot_mtc_likelihood(mtc_likelihoods, axes[1]) plot_metrics(scenario_valuations, mtc_likelihoods) # + id="AVhn3IX7k7Zd" # Saving figures for paper fig, ax = plt.subplots(1, 1, figsize=(5, 3)) plot_scenario_valuation(scenario_valuations, ax) fig.subplots_adjust(bottom=0.15, left=0.15, top=0.95, right=0.95) ax.set_title(None) plt.savefig('scenario_valuation_with_averaging.pdf') plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(5, 3)) plot_mtc_likelihood(mtc_likelihoods, ax) fig.subplots_adjust(bottom=0.15, left=0.15, top=0.95, right=0.95) ax.set_title(None) plt.savefig('mtc_likelihoods_with_averaging.pdf') plt.close(fig) # + [markdown] id="thE8Cf-l_6Ug" # ## With a machine learning algorithm (Logistic regression) # + [markdown] id="4ObWV_VH_6Ug" # We choose Logistic regression from scikit-learn as our ML algorithm. The logistic regression loss that we minimize is # # $$ # l(y, p) = \mathbb{1}_{y = 1} \log(p) + \mathbb{1}_{y = 0} \log(1 - p) # $$ # # However, our fixed-point algorithm will make appear continuous labels ($y$ is the result of the internal attribution $\mu$) that cannot be fit with scikit-learn's logistic regression. Indeed, this logisitic regression can only take 0-1 labels as input. # # A basic extension of logistic regression to learn with a continuous label $y \in [0, 1]$, consisits in minimizing the binomial loss that writes: # # $$ # l(y, p) = y \log(p) + (1 - y) \log(1 - p) # $$ # # In order to minimize this loss with a logistic regression optimizer, we generate a new dataset in which we duplicate the rows and assign them a weight : a "positive" row a label 1 and a weight $y$ and a "negative" row with a label 0 and a weight $1 - y$. On such a dataset, minimizing the logistic regression loss is equivalent to minimizing the binomial loss. # + id="KIA9pxLp_6Uh" class CountinuousLogisticRegression(LogisticRegression): @staticmethod def from_countinuous_to_weighted_binary_labels(labels, features): """Transforms a dataset with continuous labels into a bigger dataset with weighted binary labels. See explanation above. """ weigths_positive = labels weigths_negative = 1 - labels label_positive = np.ones_like(labels) label_negative = np.zeros_like(labels) weights = np.hstack((weigths_positive, weigths_negative)) weighted_labels = np.hstack((label_positive, label_negative)) weighted_features = np.vstack((features, features)) return weighted_labels, weighted_features, weights def fit(self, X, y): # 1. Generate dataset, use continuous labels to weighted dataset trick weighted_labels, weighted_features, weights = self.from_countinuous_to_weighted_binary_labels(y, X) # 2. Train model return LogisticRegression.fit(self, weighted_features, weighted_labels, sample_weight=weights) # + id="Rk0OY0Yp_6Ui" def compute_display_valuations_with_logistic_regression(dataset): current_dataset = dataset.copy() labels = current_dataset['labels'].values features = np.vstack(current_dataset['features'].values) model = CountinuousLogisticRegression(max_iter=1000, penalty='none') model.fit(features, labels) model_predictions = model.predict_proba(features)[:, 1] current_dataset['scenario_valuation'] = model_predictions return current_dataset # + id="2XbsNQtt_6Uk" current_dataset = generate_dataset(seed=2390) # Initialize with last click current_dataset['labels'] = current_dataset['conversion'].values scenario_valuations = [] mtc_likelihoods = [] for _ in range(30): current_dataset = run_one_iteration(current_dataset, compute_display_valuations_with_logistic_regression) scenario_valuation, current_mtc_likelihood = compute_metrics(current_dataset, compute_display_valuations_with_logistic_regression) scenario_valuations += [scenario_valuation] mtc_likelihoods += [current_mtc_likelihood] # + id="u0Wx9zNj_6Un" outputId="c3047222-7c8a-4fd6-c62d-47880b614da7" colab={"base_uri": "https://localhost:8080/", "height": 299} plot_metrics(scenario_valuations, mtc_likelihoods) # + [markdown] id="CT-cjvH9IzT_" # ## Data distribution robustness # + id="o-vzXI-Z_6Uo" valuations_given_proba_core_attribution = {} valuations_given_proba_last_touch = {} scenario_1_probas = np.linspace(0, 1, 11)[1:-1] for scenario_1_proba in scenario_1_probas: current_dataset = generate_dataset(seed=2390, scenario_1_proba=scenario_1_proba) # Initialize with last touch: last display beforethe conversion gets all credit current_dataset['labels'] = current_dataset['conversion'].values scenario_valuations = [] mtc_likelihoods = [] for i in range(100): current_dataset = run_one_iteration(current_dataset, compute_display_valuations_by_averaging) scenario_valuation, _ = compute_metrics(current_dataset, compute_display_valuations_by_averaging) if i == 0: valuations_given_proba_last_touch[scenario_1_proba] = scenario_valuation valuations_given_proba_core_attribution[scenario_1_proba] = scenario_valuation # + id="j6i2dUm9JOdA" outputId="7c4e14cc-704a-4526-a27c-8d0769d29ade" colab={"base_uri": "https://localhost:8080/", "height": 245} def plot_display_valuation_given_scenario_proba(valuations_given_proba, ax): for display in ['Display A', 'Display B']: ax.plot( scenario_1_probas, [valuations_given_proba[p][display] for p in scenario_1_probas], label=f'{display}', marker='x' ) ax.legend() ax.set_xlabel('Probability of scenario 1') ax.set_ylabel('Display valuation') fig, axes = plt.subplots(1, 2, figsize=(8, 3), sharey=True) plot_display_valuation_given_scenario_proba(valuations_given_proba_last_touch, axes[0]) axes[0].set_title('Last touch') plot_display_valuation_given_scenario_proba(valuations_given_proba_core_attribution, axes[1]) axes[1].set_title('Core attribution') None # + id="4zR3walTJTDr" # Saving figures for paper fig, ax = plt.subplots(1, 1, figsize=(5, 3)) plot_display_valuation_given_scenario_proba(valuations_given_proba_last_touch, ax) fig.subplots_adjust(bottom=0.15, left=0.15, top=0.95, right=0.95) plt.savefig('distributional_robustness_last_touch.pdf') plt.close(fig) fig, ax = plt.subplots(1, 1, figsize=(5, 3)) plot_display_valuation_given_scenario_proba(valuations_given_proba_core_attribution, ax) fig.subplots_adjust(bottom=0.15, left=0.15, top=0.95, right=0.95) plt.savefig('distributional_robustness_core_attribution.pdf') plt.close(fig) # + id="trF5sVQeKBqx"
Core_attribution_synthetic_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: dl-projects # language: python # name: dl-projects # --- import amlutils import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers tf.__version__ physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True # ## Parse Training Data csv_file = "./data/Top30.csv" csv_data = amlutils.csv_utils.parse_csv_file(csv_file, quotechar='"', encoding="latin-1") headers = csv_data[0] csv_data = csv_data[1:] print(headers) print(len(csv_data)) # + import re def striphtml(data): p = re.compile(r'<.*?>') return p.sub('', data) def stripnewlinechars(data): data = data.replace(r"\n", "") data = data.replace(r"\r", "") return data # - job_description_index = headers.index("Description") job_title_index = headers.index("Query") job_titles = [row[job_title_index] for row in csv_data] job_descriptions = [row[job_description_index] for row in csv_data] no_html_tags_descriptions = [striphtml(desc) for desc in job_descriptions] # ## Ensure there is no Job Title in description no_label_in_descriptions= [ desc.replace(job_titles[i], "") for i, desc in enumerate(no_html_tags_descriptions) ] no_html_tags_descriptions[0], no_label_in_descriptions[0] # + classes = set(job_titles) num_clasess = len(classes) vocabulary_size = 5000 num_samples = len(job_titles) class_str_to_index = {label: index for index, label in enumerate(classes)} index_to_class = {index: label for label, index in class_str_to_index.items()} # convert string labels to integer indices for training job_titles_indexed = [class_str_to_index[job_title] for job_title in job_titles] print(f"Num Classes: {len(classes)}, Num Samples: {num_samples}") # - text_tokenizer = tf.keras.preprocessing.text.Tokenizer( num_words=vocabulary_size ) text_tokenizer.fit_on_texts(no_label_in_descriptions) train_sequences = text_tokenizer.texts_to_sequences(no_html_tags_descriptions) index = 100 subset_train_sequences, subset_job_titles = train_sequences[:index], job_titles_indexed[:index] # + batch_size = 64 dataset = tf.data.Dataset.from_generator( lambda: zip(train_sequences, job_titles_indexed), output_types=(tf.int32, tf.int32), ) dataset = dataset.shuffle(100000).map( lambda features, label: (features, label) ).padded_batch( batch_size=batch_size, padded_shapes=((None,), ()), ).prefetch( buffer_size=tf.data.experimental.AUTOTUNE ) # - num_train_samples = int(0.8 * num_samples // batch_size) num_test_samples = num_samples // batch_size - num_train_samples print(num_train_samples, num_test_samples) train_dataset = dataset.take(num_train_samples) test_dataset = dataset.skip(num_train_samples) len(list(test_dataset.as_numpy_iterator())) # + # list(train_dataset.as_numpy_iterator()) # - model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocabulary_size, 32), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(num_clasess) ]) model.compile( loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(1e-4), metrics=['accuracy'], ) model.summary() history = model.fit( train_dataset, epochs=2, ) # ## Evaluation on Random Sample # + import random index = random.randint(0, len(job_titles_indexed)-1) description = no_label_in_descriptions[index] description_sequence = text_tokenizer.texts_to_sequences([description]) label = model.predict(description_sequence) softmax_labels = tf.nn.softmax(label) print(index, job_titles_indexed[index], index_to_class[job_titles_indexed[index]], tf.math.argmax(softmax_labels, axis=1)) # print(description, description_sequence, ) # - # ## Evaluation on Test Dataset gts = [] predictions = [] for batch in test_dataset: features, gt = batch prediction = model.predict_classes(features) gts.extend(gt) predictions.extend(prediction) len(gts), len(predictions) accuracy = tf.keras.metrics.Accuracy() accuracy(gts, predictions) total_correct = 0 total_incorrect = 0 for i in range(len(gts)): if gts[i] == predictions[i]: total_correct += 1 else: total_incorrect += 1 print(total_correct, total_incorrect, total_correct/(total_correct + total_incorrect)) # ## Plotting # + import matplotlib.pyplot as plt def plot_graphs(history, metric): plt.plot(history.history[metric]) plt.xlabel("Epochs") plt.ylabel(metric) plt.legend([metric]) plt.show() # - history.history["accuracy"] plot_graphs(history, "accuracy")
text/job_titles/job_title_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # argv: # - C:\Users\Chad\.virtualenvs\python-shit-Hatrr_zd\Scripts\python.exe # - -m # - ipykernel_launcher # - -f # - '{connection_file}' # display_name: hydrogen-python-shit # language: python # name: hydrogen-python-shit # --- # + [markdown] nteract={"transient": {"deleting": false}} # # COVID-19 NOTEBOK # - - - # # + [markdown] nteract={"transient": {"deleting": false}} # ### `import` libraries # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} import json import pandas as pd import requests from datetime import datetime from datetime import timedelta # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} # send_headers = {'Accept': 'application/json'} response = requests.get('https://coronavirus-tracker-api.herokuapp.com/all') response.encoding = 'utf-8' # Optional: requests infers this internally data = response.json() # + [markdown] nteract={"transient": {"deleting": false}} # ### save `covid19.json` # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} def save_json(): with open('covid19.json', 'w') as f: json.dump(data, f) save_json() # + [markdown] nteract={"transient": {"deleting": false}} # #### latest variables # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} confirmed_latest = data['latest']['confirmed'] deaths_latest = data['latest']['deaths'] recovered_latest = data['latest']['recovered'] deaths_updated_json = data['deaths']['last_updated'] def confirmed_func(): print(confirmed_latest) confirmed_func() # + [markdown] nteract={"transient": {"deleting": false}} # ## print death locations # # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} def print_latest(): """ prints latest stats """ print("Confirmed: " + str(confirmed_latest)) print("Deaths: " + str(deaths_latest)) print("Last Updated: " + deaths_updated_json) print('Recovered: ' + str(recovered_latest)) print_latest() # + [markdown] nteract={"transient": {"deleting": false}} # ## Confirmed # confirmed locations DataFrame # # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} pd_confirmed_locations = pd.DataFrame(data['confirmed']['locations'], columns=["country", "latest", "province"]) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} print(pd_confirmed_locations.head()) print("-=" * 40) print(pd_confirmed_locations.sort_values(ascending=False, by=['latest']).head()) print("-=" * 40) print('Number of colums in Dataframe : ', len(pd_confirmed_locations.columns)) print(pd_confirmed_locations.columns) print('Number of rows in Dataframe : ', len(pd_confirmed_locations.index)) # + [markdown] nteract={"transient": {"deleting": false}} # ### deaths # # death locations # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} pd_death_locations = pd.DataFrame(data['deaths']['locations'], columns=["country", "latest", "province"] ).sort_values(ascending=False, by='latest') print(pd_death_locations.head()) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} test_pd = pd.DataFrame(data['deaths']['locations'], columns=["country", "history"]) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} history = pd.DataFrame(data['deaths']['locations'], columns=["history"]).apply(pd.Series) # + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} now = datetime.now() d = now.strftime("%#m/%d/%y") yesterday = (datetime.now() - timedelta(days=1)).strftime("%#m/%d/%y") print(d) print("yesterday = " + yesterday)
covid19/covid19notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import math import numpy as np import pandas as pd import uproot import matplotlib.pyplot as plt from sificc_lib import utils, Event, Simulation, root_files pd.options.display.max_columns = 100 pd.options.display.max_rows = 100 pd.options.display.float_format = '{:,.3f}'.format # %matplotlib inline # - simulation = Simulation(root_files.HPC_LLR) print("analysis of the 'highest statistics' root file") utils.show_simulation_setup(simulation) print() utils.show_root_file_analysis(simulation, only_valid=True) utils.calculate_normalizations(simulation, only_valid=True)
root-files-analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={} colab_type="code" id="zX4Kg8DUTKWO" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + colab={} colab_type="code" id="gqWabzlJ63nL" import numpy as np import matplotlib.pyplot as plt # + colab={} colab_type="code" id="sJwA96JU00pW" def plot_series(time, series): plt.figure(figsize=(10, 6)) plt.plot(time, series) plt.xlabel("time") plt.ylabel("value") plt.grid(True) plt.show() # + colab={} colab_type="code" id="t30Ts2KjiOIY" def trend(time, slope=0): return slope * time # + colab={} colab_type="code" id="BLt-pLiZ0nfB" time = np.arange(4 * 365 + 1) baseline = 10 series = trend(time, 0.1) plot_series(time, series) # + colab={} colab_type="code" id="89gdEnPY1Niy" def seasonal_pattern(season_time): return np.where(season_time < 0.4, np.cos(season_time * 2 * np.pi), 1 / np.exp(3 * season_time)) def seasonality(time, period, amplitude=1, phase=0): season_time = ((time + phase) % period) / period return amplitude * seasonal_pattern(season_time) # + colab={} colab_type="code" id="7kaNezUk1S9l" baseline = 10 amplitude = 40 series = seasonality(time, period=365, amplitude=amplitude) plot_series(time, series) # + colab={} colab_type="code" id="AyqFdaIN1oy5" slope = 0.05 series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude) plot_series(time, series) # + colab={} colab_type="code" id="kw0tvS8L1R_8" def noise(time, noise_level=1): return np.random.randn(len(time)) * noise_level # + colab={} colab_type="code" id="2bRDx8K816N9" noise_level = 15 noisy_series = series + noise(time, noise_level) plot_series(time, noisy_series) # + colab={} colab_type="code" id="8tUBeSvE77Sw" noise_level = 40 noisy_series = series + noise(time, noise_level) plot_series(time, noisy_series) # + colab={} colab_type="code" id="GICxGswL2aqK" def autocorrelation(time, amplitude): rho1 = 0.5 rho2 = -0.1 ar = np.random.randn(len(time) + 50) ar[:50] = 100 for step in range(50, len(time) + 50): ar[step] += rho1 * ar[step - 50] ar[step] += rho2 * ar[step - 33] return ar[50:] * amplitude # + colab={} colab_type="code" id="mCaWIWoDGVCL" def autocorrelation(time, amplitude): rho = 0.8 ar = np.random.randn(len(time) + 1) for step in range(1, len(time) + 1): ar[step] += rho * ar[step - 1] return ar[1:] * amplitude # + colab={} colab_type="code" id="MVM204K66bnC" series = autocorrelation(time, 10) plot_series(time[:200], series[:200]) # + colab={} colab_type="code" id="M8C6ZBUp6jmZ" series = noise(time) plot_series(time[:200], series[:200]) # + colab={} colab_type="code" id="9MZ2sCmM8XPU" series = autocorrelation(time, 10) + trend(time, 2) plot_series(time[:200], series[:200]) # + colab={} colab_type="code" id="hqx5et9Bzp5e" series = autocorrelation(time, 10) + seasonality(time, period=50, amplitude=150) + trend(time, 2) plot_series(time[:200], series[:200]) # + colab={} colab_type="code" id="XoRqB8AK0Sfz" series = autocorrelation(time, 10) + seasonality(time, period=50, amplitude=150) + trend(time, 2) series2 = autocorrelation(time, 5) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550 series[200:] = series2[200:] #series += noise(time, 30) plot_series(time[:300], series[:300])
practice/courses/Sequences, Time Series and Predicion/week1/S+P_Week_1_Lesson_1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Scraping Arxiv Metadata using Arxiv API # # In this tutorial, we'll scrape metadata from Arxiv using Arxiv API. Before starting let's have quick overview of Arxiv data and ways of accessing it. # # ### About Arxiv # # Arxiv is an electronic repository for preprints. It's mostly used by quantitative fields such as computer science, math etc. Papers on Arxiv are not subject to peer-review. Arxiv which is owned and operated by Cornell University is currently hosting over 1.5 million papers. More information about Arxiv can be found in [here](https://arxiv.org/). # # ### Arxiv Data Access # # There are several options to scrape data from Arxiv. You need to first determine what kind of data you need. There are two types of data one can scrape from Arxiv. # # 1. Metadata # 2. Full-Text # # Metadata includes information about papers such as title, authors, publication and last update dates, abstract, category, Arxiv ID for the paper etc. Metadata for Arxiv papers are stored and retrieved in Atom format. More information about Atom can be found in [here](https://validator.w3.org/feed/docs/atom.html). # # Full-Text data includes paper contents as well, mostly in Tex/Latex format. In this tutorial we'll study scraping metadata. There are three ways in which one can access metadata: # # 1. OAI-PMH: Good for bulk metadata access. # 2. Arxiv API: Good for real-time programmatic use. # 3. RSS Feeds: Best for accessing daily updates on Arxiv. # # ### Arxiv API # # Scraping metadata using Arxiv API is pretty straigtforward. One first need to construct a query. A query to Arxiv API is a single URL. For instance, let's say we'd like to access the first 2 papers whose titles include the word "graph" in category of statistical theory stat.TH. For the available category types and other query constructing tips please refer to [Arxiv API User Manual](https://arxiv.org/help/api/user-manual#query_details). Note that some categories are cross-listed. # # Example Query: # # http://export.arxiv.org/api/query?search_query=ti:graph+AND+cat:stat.TH&start=0&max_results=2 # # # ### Handy Libraries in Python # # We'll use `urllib` to query Arxiv API and `feedparser` to parse the Atom returned. Let's retrieve the feed to the above query and print metadata for every paper. # # import the required libraries import urllib.request import feedparser def query_arxiv(search_query, start, max_results = -1): # accessing Arxiv API base_url = 'http://export.arxiv.org/api/query?' # constructing our query query = 'search_query=%s&start=%i%s' % (search_query, start, "" if max_results == -1 else ("&max_results=%i"% max_results)) # perform a GET request using the base_url and query response = urllib.request.urlopen(base_url+query).read() # parse the response using feedparser feed = feedparser.parse(response) return feed def main(): search_query = 'ti:graph+AND+cat:stat.TH' start = 0 max_results = 2 # Querying the Arxiv API feed = query_arxiv(search_query, start, max_results) # Print the feed information print('Feed last updated: %s' % feed.feed.updated) print('Total results for this query: %s' % feed.feed.opensearch_totalresults) print('Max results for this query: %s\n' % len(feed.entries)) for entry in feed.entries: print("Title: ", entry.title) print("Authors: ") for name in (author.name for author in entry.authors): print(name) print("Publication Date: ", entry.published) print("Arxiv ID: ", entry.id, "\n") main() # ### Sample Queries # # * "LSTM" in title or abstract # # http://export.arxiv.org/api/query?search_query=all:LSTM # # * "graph" in title but category shouldn't be stat.TH, we start the result number 2 based on our query and expect 4 papers in total. Note that Arxiv returns 2000 maximum per query so it's useful that the order of papers are the same for the same query every time. # # http://export.arxiv.org/api/query?search_query=ti:graph+ANDNOT+cat:stat.TH&start=2&max_results=4 # # * Papers of either <NAME> or <NAME>. Note that if you don't specify number of results, it'll return 10 by default. Moreover, %22 is used to texts with more than one words. # # http://export.arxiv.org/api/query?search_query=au:%22daphne+koller%22+OR+au:%22larry+wasserman%22 #
Tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import seaborn as sns import matplotlib.pyplot as plt sns.set(color_codes=True) # %matplotlib inline auto = pd.read_csv('Automobile.csv') auto.head() sns.distplot(auto['city_mpg']) sns.distplot(auto['normalized_losses'], kde=False, rug=True) sns.jointplot(auto['engine_size'], auto['horsepower']) # kind : { "scatter" | "reg" | "resid" | "kde" | "hex" }, optional sns.jointplot(auto['engine_size'], auto['horsepower'], kind="kde") sns.pairplot(auto[['normalized_losses', 'engine_size', 'horsepower']]) sns.stripplot(auto['fuel_type'], auto['horsepower'], jitter = True) # One categorical value plotted against one continous variable sns.swarmplot(auto['fuel_type'], auto['horsepower']) sns.boxplot(auto['number_of_doors'], auto['horsepower']) sns.boxplot(auto['number_of_doors'], auto['horsepower'], hue=auto['fuel_type']) sns.barplot(auto['body_style'], auto['horsepower'],hue=auto['engine_location']) sns.countplot(auto['body_style'], hue=auto['engine_location']) sns.pointplot(auto['fuel_system'], auto['horsepower'], hue=auto['number_of_doors']) # + sns.factorplot(x="fuel_type", y="horsepower", data=auto, hue="number_of_doors", col="engine_location", kind='swarm') # Various types of kind input : {point, bar, count, box, violin, strip} # - sns.lmplot(x="horsepower", y="peak_rpm", data=auto) sns.lmplot(x="horsepower", y="peak_rpm", data=auto, hue="fuel_type")
CourseContent/03-Intro.to.Python.and.Basic.Statistics/Week2/Automobile case study.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 1 Preparations # ### 1.1 Set Options # + ### Set options here. #Structural options filetype_input = 'gdx' #Choose input file type: 'gdx' or 'csv' gams_dir = 'C:/GAMS/win64/28.2' #Only required if filetype_input == 'gdx' market = 'DayAhead' #Choose from ['Balancing', 'DayAhead', 'FullYear', 'Investment'] COMMODITY = 'Electricity' #Choose from: ['Electricity', 'H2', 'Other']. Add to csv-files name (only relevant if filetype_input == 'csv'). If 'Other': go to cell 1.4.0. SCENARIO = 'hubs' #Add scenario to read file name YEAR = 'all' #Add year to read file name (e.g. '2025', '2035', 'full') SUBSET = 'all' #Add subset to read file name (e.g. 'full') year = 2035 #Year to be displayed LINES = 'CongestionFlow' #Choose from: ['Capacity', 'Flow', 'CongestionFlow']. For 'CongestionFlow', exo_end automatically switches to 'Total'. exo_end = 'Total' # Choose from ['Endogenous', 'Exogenous', 'Total']. For 'CongestionFlow', exo_end automatically switches to 'Total'. S = 'S02' #Season T = 'T073' #Hour # hubs hub_display = True hub_size = 10 hub_decimals = 0 #Number of decimals shown for hub capacities background_hubsize = True #Displaying the true size of the hub as a circle on the map. hub_area = 2.8 #MW / km^2, background hub size on map. hub_area_opacity = 0.3 #Opacity of background hub size. #Visual options label_min = 4 #Minimum transmission capacity (GW) shown on map in text font_line = 10 #Font size of transmission line labels font_hub = 10 #Font size of hub labels font_region = 12 #Font size of region labels line_decimals = 1 #Number of decimals shown for line capacities line_width_constant = 5 #Constant related to thickness of lines: the higher the number, the narrower the lines will be flowline_breaks = [0, 40, 94.999, 100] #Breaks for different congestion categories legend_values = ['Fully congested', '40-95% congested', '< 50% congested'] #Values displayed in legend #colors background_color = 'white' regions_ext_color = 'lightgrey' regions_model_color = 'grey' region_text = 'black' capline_color = 'orange' flowline_color = ['#3D9200', '#feb24c','#960028'] line_text = 'black' hub_color = 'lightblue' hub_background_color = 'lightblue' hub_text = 'black' # - # ### 1.2 Import Packages from pathlib import Path import sys import os import glob import gdxpds import pandas as pd import numpy as np import geopandas as gpd import folium from folium import plugins from IPython.display import HTML, display import json from folium.features import DivIcon #For text labels on hubs from IPython.display import display, HTML from csv import reader display(HTML(data=""" <style> div#notebook-container { width: 95%; } div#menubar-container { width: 65%; } div#maintoolbar-container { width: 99%; } </style> """)) # ### 1.3 Read geographic files # + project_dir = Path('.\input') #Load coordinates files df_unique = pd.read_csv(project_dir/'geo_files/coordinates_RRR.csv') df_region = df_unique.loc[df_unique['Type'] == 'region', ] df_bypass = pd.read_csv(project_dir/'geo_files/bypass_lines.csv') # coordinates of 'hooks' in indirect lines, to avoid going trespassing third regions #Define names of geojson and shapefile layers r_in = list(df_unique.loc[(df_unique['Display'] == 1) & (df_unique['Type'] == 'region'), 'RRR']) r_out = list(df_unique.loc[(df_unique['Display'] == 0) & (df_unique['Type'] == 'region'), 'RRR']) layers_in = {region: '' for region in r_in} layers_out = {region: '' for region in r_out} #Create dictionaries with layer names for each region; if both a shapefile and geojson file are available for one region, the geojson file is used. for region in r_in: layers_in[region] = glob.glob(f'{project_dir}/geo_files/geojson_files/'+ region + '.geojson') if bool(layers_in[region]) == False: layers_in[region] = glob.glob(f'{project_dir}/geo_files/shapefiles/'+ region + '.shp') for region in r_out: layers_out[region] = glob.glob(f'{project_dir}/geo_files/geojson_files/'+ region + '.geojson') if bool(layers_out[region]) == False: layers_out[region] = glob.glob(f'{project_dir}/geo_files/shapefiles/'+ region + '.shp') for region in layers_in: layers_in[region] = str(layers_in[region])[2:-2] #Remove brackets from file names for region in layers_out: layers_out[region] = str(layers_out[region])[2:-2] #Remove brackets from file names #Convert shapefiles to geojson files for region in layers_out: if layers_out[region][-4:] == '.shp': gpd.read_file(layers_out[region]).to_file(f'{project_dir}/geo_files/geojson_files/'+ region + '.geojson', driver='GeoJSON') layers_out[region] = layers_out[region].replace('shapefiles', 'geojson_files').replace('.shp', '.geojson') # - # # 1.4 Read run-specific files # ##### 1.4.0 If COMMODITY == 'Other': define variables or file names if COMMODITY == 'Other': if filetype_input == 'gdx': var_list = ['G_CAP_YCRAF', 'XH2_CAP_YCR', 'XH2_FLOW_YCRST', 'PRO_YCRAGFST'] #Fill in variables to read, e.g. ['G_CAP_YCRAF', 'X{COMMODITY}_CAP_YCR', 'X{COMMODITY}_FLOW_YCRST', 'PRO_YCRAGST'] if filetype_input == 'csv': flow_file = 'FlowH2Hourly_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' #Fill in flow file name if applicable, e.g. 'Flow{COMMODITY}Hourly_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' transcap_file = 'CapacityH2Transmission_' + SCENARIO + '_' + YEAR + '_'+ SUBSET + '.csv' #Fill in transmission capacity file name, e.g. 'Capacity{COMMODITY}Transmission_'+ SCENARIO + '_' + YEAR + '_'+ SUBSET + '.csv' # ### 1.4A - GDX Inputs # ##### 1.4A.1 Function: reading gdx-files if filetype_input == 'gdx': def df_creation(gdx_file, varname): df = pd.DataFrame() if '_' in gdx_file: # if yes: extract scenario name from gdx filename scenario = gdx_file.split('_', 3)[-3] year = gdx_file.split('_', 3)[-2] subset = gdx_file.split('_', 3)[-1][:-4] market = gdx_file.split('\\', 1)[0].split('/',3)[-1] else: # if no: use nan instead scenario = 'nan' # create empty temporary dataframe and load the gdx data into it temp = pd.DataFrame() temp = gdxpds.to_dataframe(gdx_file, varname, gams_dir=gams_dir, old_interface=False) # add a scenario column with the scenario name of the current iteration temp['Scenario'] = scenario temp['Market'] = market temp['run'] = scenario + '_' + year + '_' + subset # rearrange the columns' order cols = list(temp.columns) cols = [cols[-1]] + cols[:-1] temp = temp[cols] # concatenate the temporary dataframe to the preceeding data df = pd.concat([df, temp], sort=False) return df # ##### 1.4A.2 - Define var_list if filetype_input == 'gdx': if COMMODITY == 'Electricity': var_list = [] if LINES == 'Capacity' or LINES == 'CongestionFlow': var_list = var_list + ['G_CAP_YCRAF', 'X_CAP_YCR'] if LINES == 'Flow' or LINES == 'CongestionFlow': var_list = var_list + ['X_FLOW_YCRST'] if hub_display == True: var_list = var_list + ['PRO_YCRAGFST'] if COMMODITY == 'H2': var_list = [] if LINES == 'Capacity' or LINES == 'CongestionFlow': var_list = var_list + ['G_CAP_YCRAF', 'XH2_CAP_YCR'] if LINES == 'Flow' or LINES == 'CongestionFlow': var_list = var_list + ['XH2_FLOW_YCRST'] if hub_display == True: var_list = var_list + ['PRO_YCRAGFST'] # ##### 1.4A.3 - Use function to read inputs if filetype_input == 'gdx': runs = list() gdx_file_list = list() # directory to the input gdx file(s) #gdx_file_list = gdx_file_list + glob.glob('./input/results/'+ market + '/*.gdx') gdx_file = glob.glob('./input/results/'+ market + '\\MainResults_' + SCENARIO + '_' + YEAR + '_' + SUBSET + '.gdx') gdx_file = gdx_file[0] all_df = {varname: df for varname, df in zip(var_list,var_list)} for varname, df in zip(var_list, var_list): all_df[varname] = df_creation(gdx_file, varname) if all_df[varname]['run'][0] not in runs: runs.append(all_df[varname]['run'][0]) #run_dict = dict(zip(gdx_file_list, runs) ) #all_df = dict((run_dict[key], value) for (key, value) in all_df.items()) #Transmission capacity data if LINES == 'Capacity' or LINES == 'CongestionFlow': if COMMODITY == 'Electricity': df_capacity = all_df['X_CAP_YCR'] if COMMODITY == 'H2': df_capacity = all_df['XH2_CAP_YCR'] if COMMODITY == 'Other': df_capacity = all_df[var_list[1]] #Transmission flow data if LINES == 'Flow' or LINES == 'CongestionFlow': if COMMODITY == 'Electricity': df_flow = all_df['X_FLOW_YCRST'] if COMMODITY == 'H2': df_flow = all_df['XH2_FLOW_YCRST'] if COMMODITY == 'Other': if LINES == 'Flow': df_flow = all_df[var_list[1]] if LINES == 'CongestionFlow': df_flow = all_df[var_list[2]] # ##### 1.4A.4 - Hub data if filetype_input == 'gdx' and hub_display == True: hub_windgen = (pd.read_csv(project_dir/'geo_files/hub_technologies.csv', sep = ',', quotechar = '"').hub_name) df_capgen = all_df['G_CAP_YCRAF'] if LINES == 'Flow' or LINES == 'CongestionFlow': df_hubprod = all_df['PRO_YCRAGFST'] df_hubprod['Y'] = df_hubprod['Y'].astype(int) df_hubprod = df_hubprod.loc[(df_hubprod['G'].isin(hub_windgen)) & (df_hubprod['TECH_TYPE'] == 'WIND-OFF') & \ (df_hubprod['Y']==year) & (df_hubprod['SSS'] == S) & (df_hubprod['TTT']==T), ] # ### 1.4B1 - Read CSV files map_name = 'Transmission' + COMMODITY + '_' + LINES + '_' + str(year) + '_Map.html' if filetype_input == 'csv': generation_file = 'CapacityGeneration_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' if COMMODITY == 'Electricity': flow_file = 'FlowElectricityHourly_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' transcap_file = 'CapacityElectricityTransmission_'+ SCENARIO + '_' + YEAR + '_'+ SUBSET + '.csv' if COMMODITY == 'H2': flow_file = 'FlowH2Hourly_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' transcap_file = 'CapacityH2Transmission_'+ SCENARIO + '_' + YEAR + '_'+ SUBSET + '.csv' #Transmission capacity data df_capacity = pd.read_csv(str(project_dir) + '/results/' + str(market) + '/' + str(transcap_file), sep = ',', quotechar = '"') #Transmission flow data if LINES == 'Flow' or LINES == 'CongestionFlow': df_flow = pd.read_csv(str(project_dir) + '/results/' + str(market) + '/' + str(flow_file), sep = ',', quotechar = '"') if hub_display == True: prod_file = 'ProductionHourly_'+ SCENARIO + '_' + YEAR + '_' + SUBSET + '.csv' hub_windgen = (pd.read_csv(project_dir/'geo_files/hub_technologies.csv', sep = ',', quotechar = '"').hub_name) #Generation capacity data df_capgen = pd.read_csv(str(project_dir) + '/results/' + str(market) + '/' + str(generation_file), sep = ',', quotechar = '"') if LINES == 'Flow' or LINES == 'CongestionFlow': #Hub production data df_hubprod = pd.read_csv(str(project_dir) + '/results/' + str(market) + '/' + str(prod_file), sep = ',', quotechar = '"') df_hubprod = df_hubprod.loc[(df_hubprod['G'].isin(hub_windgen)) & (df_hubprod['TECH_TYPE'] == 'WIND-OFF') & \ (df_hubprod['Y']==year) & (df_hubprod['SSS'] == S) & (df_hubprod['TTT']==T), ] # ### 1.4B2 - Calibrate column names column_dict = {'Val':'Value', 'Y':'Year', 'C':'Country'} if LINES == 'Capacity' or LINES == 'CongestionFlow': df_capacity = df_capacity.rename(columns = column_dict) if LINES == 'Flow' or LINES == 'CongestionFlow': df_flow = df_flow.rename(columns = column_dict) if hub_display == True: df_capgen = df_capgen.rename(columns = column_dict) if LINES == 'Flow' or LINES == 'CongestionFlow': df_hubprod = df_hubprod.rename(columns = column_dict) # # 2 Processing of dataframes # ### 2.1 Replace "EPS" with 0 #Replace possible "Eps" with 0 df_capacity.Value=df_capacity.Value.replace('Eps', 0) df_capacity.Value=pd.to_numeric(df_capacity.Value) if LINES == 'Flow' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Capacity' df_flow.Value=df_flow.Value.replace('Eps', 0) df_flow.Value=pd.to_numeric(df_flow.Value) if hub_display == True: df_capgen.Value=df_capgen.Value.replace('Eps', 0) df_capgen.Value=pd.to_numeric(df_capgen.Value) if LINES == 'Flow' or LINES == 'CongestionFlow': df_hubprod.Value=df_hubprod.Value.replace('Eps', 0) df_hubprod.Value=pd.to_numeric(df_hubprod.Value) # ### 2.2 Add Coordinates + Select Time + Convert Units #Flows if LINES == 'Flow' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Capacity' df_flow['Year'] = df_flow['Year'].astype(int) #Keep only data from moment of interest df_flow = df_flow.loc[df_flow['Year'] == year] df_flow = df_flow.loc[df_flow['SSS'] == S,] df_flow = df_flow.loc[df_flow['TTT'] == T, ] for i,row in df_flow.iterrows(): for j in range(0,len(df_unique)): if df_flow.loc[i,'IRRRE'] == df_unique.loc[j, 'RRR']: df_flow.loc[i,'LatExp'] = df_unique.loc[j, 'Lat'] df_flow.loc[i,'LonExp'] = df_unique.loc[j, 'Lon'] if df_flow.loc[i,'IRRRI'] == df_unique.loc[j, 'RRR']: df_flow.loc[i,'LatImp'] = df_unique.loc[j, 'Lat'] df_flow.loc[i,'LonImp'] = df_unique.loc[j, 'Lon'] #Convert flow from MWh to GWh df_flow['Value'] = df_flow['Value'] / 1000 df_flow = df_flow.reset_index(drop = True) if len(df_flow) == 0: print("Error: Timestep not in data; check year, S and T.") sys.exit() # ### 2.3 Group hub data #Generation Capacities if hub_display == True: df_capgen['Year'] = df_capgen['Year'].astype(int) df_capgen = df_capgen.merge(df_unique, on = 'RRR', how = 'left', left_index = True).reset_index(drop = True) #Add coordinates of each region df_capgen = df_capgen.loc[df_capgen['Year'] == year] #Keep only data from year of interest df_hubcap = df_capgen.loc[df_capgen['G'].isin(hub_windgen),] #Keep only hub data df_hubcap_agg = pd.DataFrame(df_hubcap.groupby(['Year', 'Country', 'RRR', 'Lat', 'Lon'])['Value'].sum().reset_index()) #Sum all capacities (of different wind turbines) at each location df_hubcap_agg['Radius'] = np.sqrt(df_hubcap_agg['Value'] * 1000 / hub_area / np.pi) # Create column of hub radius (in kilometres) if LINES == 'Flow' or LINES == 'CongestionFlow': #Merge all relevant hub info into one dataframe df_hubprod = pd.DataFrame(df_hubprod.groupby(['Year', 'Country', 'RRR'])['Value'].sum().reset_index()) #Sum all production (of different wind turbines) at each location df_hubprod.Value = df_hubprod.Value/1000 df_hubprod.rename(columns = {'Value': 'prod_GWh'}, inplace = True) df_hub = pd.merge(df_hubcap_agg, df_hubprod[['RRR', 'prod_GWh']], on = 'RRR', how = 'left', left_index = True).reset_index(drop = True) else: df_hub = df_hubcap_agg.copy() #Display a zero instead of NaN values (i.e. if there is no production in that hour, so df_hubprod row does not exist) df_hub.loc[df_hub.prod_GWh.isna() == True, 'prod_GWh'] = 0 # ### 2.4 Prepare capacity dataframe #Transmission Capacities if LINES == 'Capacity' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Flow' df_capacity['Year'] = df_capacity['Year'].astype(int) df_capacity = df_capacity.loc[df_capacity['Year'] == year, ].reset_index(drop = True) #Keep only data from year of interest if exo_end == 'Total' or LINES == 'CongestionFlow': col_keep = list(np.delete(np.array(df_capacity.columns),np.where((df_capacity.columns == 'VARIABLE_CATEGORY') | \ (df_capacity.columns == 'Value')) )) #Create list with all columns except 'Variable_Category' and 'Value' df_capacity = pd.DataFrame(df_capacity.groupby(col_keep)['Value'].sum().reset_index() )#Sum exogenous and endogenous capacity for each region if exo_end == 'Endogenous' and LINES != 'CongestionFlow': df_capacity = df_capacity.loc[df_capacity['VARIABLE_CATEGORY'] == 'ENDOGENOUS', ] if exo_end == 'Exogenous' and LINES != 'CongestionFlow': df_capacity = df_capacity.loc[df_capacity['VARIABLE_CATEGORY'] == 'EXOGENOUS', ] for i,row in df_capacity.iterrows(): for j in range(0,len(df_unique)): if df_capacity.loc[i,'IRRRE'] == df_unique.loc[j, 'RRR']: df_capacity.loc[i,'LatExp'] = df_unique.loc[j, 'Lat'] df_capacity.loc[i,'LonExp'] = df_unique.loc[j, 'Lon'] if df_capacity.loc[i,'IRRRI'] == df_unique.loc[j, 'RRR']: df_capacity.loc[i,'LatImp'] = df_unique.loc[j, 'Lat'] df_capacity.loc[i,'LonImp'] = df_unique.loc[j, 'Lon'] if len(df_capacity) == 0: print("Error: No capacity found. Check year and exo_end.") sys.exit() # ### 2.5 Add bypass coordinates for indirect lines # + if LINES == 'Capacity': df_bypass = pd.merge(df_bypass, df_capacity[['Year', 'Country', 'IRRRE', 'IRRRI', 'UNITS', 'Value']], on = ['IRRRE', 'IRRRI'], how = 'left') #Replace existing row by 2 bypass rows keys = list(df_bypass.columns.values)[0:2] i1 = df_capacity.set_index(keys).index i2 = df_bypass.set_index(keys).index df_capacity = df_capacity[~i1.isin(i2)] #Delete existing rows that need bypass df_capacity = df_capacity.append(df_bypass, ignore_index = True, sort = True) #Append bypass rows if LINES == 'Flow' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Capacity' df_bypass = pd.merge(df_bypass, df_flow[['Year', 'Country', 'IRRRE', 'IRRRI', 'SSS', 'TTT', 'UNITS', 'Value']], on = ['IRRRE', 'IRRRI'], how = 'left').dropna() #Replace existing row by 2 bypass rows keys = list(df_bypass.columns.values)[0:2] i1 = df_flow.set_index(keys).index i2 = df_bypass.set_index(keys).index df_flow = df_flow[~i1.isin(i2)]#Delete existing rows that need bypass df_flow = df_flow.append(df_bypass, ignore_index = True, sort = True)#Append bypass rows # - # ### 2.6 Calculate Congestion if LINES == 'CongestionFlow': #Skip this cell in case LINES != 'CongestionFlow' df_flow = pd.merge(df_flow, df_capacity[['Year', 'Country', 'IRRRE', 'IRRRI', 'Value']], on = ['Year', 'Country', 'IRRRE', 'IRRRI'], how = 'left') df_flow.rename(columns={'Value_x': 'Value', 'Value_y' : 'Capacity'}, inplace = True) df_flow['Congestion'] = df_flow['Value'] / df_flow['Capacity'] * 100 #Create color codes for congestion of lines df_flow['color'] = pd.cut(df_flow['Congestion'], bins = flowline_breaks, labels = flowline_color ) # ### 2.7 One direction capacity lines #When capacity is not the same in both directions, display one: for i,row in df_capacity.iterrows(): for k,row in df_capacity.iterrows(): if (df_capacity.loc[k,'IRRRE'] == df_capacity.loc[i,'IRRRI']) & (df_capacity.loc[k,'IRRRI'] == df_capacity.loc[i,'IRRRE']) & (df_capacity.loc[k,'Value'] != df_capacity.loc[i,'Value']): df_capacity.loc[i,'Value'] = df_capacity.loc[k,'Value'] # ### 2.8 Define line centers #Define centre of each transmission line if LINES == 'Flow' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Capacity' df_flow['LatMid'] = (df_flow['LatImp'] + df_flow['LatExp']) /2 df_flow['LonMid'] = (df_flow['LonImp'] + df_flow['LonExp']) /2 if LINES == 'Capacity' or LINES == 'CongestionFlow': #Skip this cell in case LINES == 'Flow' df_capacity['LatMid'] = (df_capacity['LatImp'] + df_capacity['LatExp']) /2 df_capacity['LonMid'] = (df_capacity['LonImp'] + df_capacity['LonExp']) /2 # # 3 Create Map Features # ### 3.1 Create map # + #Create map map_center = [55.220228, 10.419778] m = folium.Map(location= map_center, zoom_start=5, tiles='') #Add background layers (sea, regions in model, countries outside of model) folium.Polygon(locations = [[-90,-180], [90,-180], [90,180], [-90,180]], color = background_color, fill_color = background_color, opacity = 1, fill_opacity = 1 ).add_to(m) #Background for region in layers_in: folium.GeoJson(data = layers_in[region], name = 'regions_in', \ style_function = lambda x:{'fillColor': regions_model_color, 'fillOpacity': 0.5, 'color': regions_model_color, 'weight':1}).add_to(m) #Regions within model for region in layers_out: folium.GeoJson(data = layers_out[region], name = 'regions_out', \ style_function = lambda x:{'fillColor': regions_ext_color, 'fillOpacity': 0.5, 'color': regions_ext_color, 'weight':1}).add_to(m) #Neighbouring countries # - # ### 3.2 Create background hub size if hub_display == True: if background_hubsize == True: for i,row in df_hub.iterrows(): folium.Circle( location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], popup=df_hub.loc[i,'RRR'], radius = df_hub.loc[i,'Radius']*1000, color = hub_background_color, opacity = 0, fill=True, fill_color = hub_background_color, fill_opacity = hub_area_opacity ).add_to(m) # ### 3.3 Add lines #Add capacity lines if LINES == 'Capacity': for i,row in df_capacity.iterrows(): folium.PolyLine(([df_capacity.loc[i,'LatExp'], df_capacity.loc[i,'LonExp']], \ [df_capacity.loc[i,'LatImp'],df_capacity.loc[i,'LonImp']]), \ color=capline_color, line_cap = 'butt', weight=df_capacity.loc[i,'Value']/line_width_constant, opacity=1).add_to(m) if df_capacity.loc[i,'Value'] > label_min: if line_decimals == 0: folium.Marker(location=[df_capacity.loc[i,'LatMid'], df_capacity.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ df_capacity.loc[i,'Value'].round(line_decimals).astype(int)))).add_to(m) else: folium.Marker(location=[df_capacity.loc[i,'LatMid'], df_capacity.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ round(df_capacity.loc[i,'Value'],line_decimals)))).add_to(m) #Add flows (single color) if LINES == 'Flow': attr = {'font-weight': 'bold', 'font-size': '24'} for i,row in df_flow.iterrows(): flow = folium.PolyLine(([df_flow.loc[i,'LatExp'], df_flow.loc[i,'LonExp']], \ [df_flow.loc[i,'LatImp'],df_flow.loc[i,'LonImp']]), \ color=capline_color, line_cap = 'butt', weight=df_flow.loc[i,'Value']/line_width_constant, opacity=1).add_to(m) plugins.PolyLineTextPath(flow, '\u2192', repeat=False ,center = True, offset=6, orientation = -90, \ attributes=attr).add_to(m) #Arrow if df_flow.loc[i,'Value'] > label_min: if line_decimals == 0: folium.Marker(location=[df_flow.loc[i,'LatMid'], df_flow.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ df_flow.loc[i,'Value'].round(line_decimals).astype(int)))).add_to(m) else: folium.Marker(location=[df_flow.loc[i,'LatMid'], df_flow.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ round(df_flow.loc[i,'Value'],line_decimals)))).add_to(m) #Add flows (color based on congestion) if LINES == 'CongestionFlow': attr = {'font-weight': 'bold', 'font-size': '24'} for i,row in df_flow.iterrows(): flow = folium.PolyLine(([df_flow.loc[i,'LatExp'], df_flow.loc[i,'LonExp']], \ [df_flow.loc[i,'LatImp'],df_flow.loc[i,'LonImp']]), \ color=df_flow.loc[i,'color'], line_cap = 'butt', weight=df_flow.loc[i,'Value']/line_width_constant, opacity=1).add_to(m) plugins.PolyLineTextPath(flow, '\u2192', repeat=False ,center = True, offset=6, orientation = -90, \ attributes=attr).add_to(m) #Arrow if df_flow.loc[i,'Value'] > label_min: if line_decimals == 0: folium.Marker(location=[df_flow.loc[i,'LatMid'], df_flow.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ df_flow.loc[i,'Value'].round(line_decimals).astype(int)))).add_to(m) else: folium.Marker(location=[df_flow.loc[i,'LatMid'], df_flow.loc[i,'LonMid']], icon=DivIcon( icon_size=(150,36), icon_anchor=(11,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_line, line_text, \ round(df_flow.loc[i,'Value'],line_decimals)))).add_to(m) # ### 3.4 Add region names #Add region names for i,row in df_region.loc[df_region['Display']==1, ].iterrows(): folium.Marker(location=[df_region.loc[i,'Lat'], df_region.loc[i,'Lon']], icon=DivIcon( icon_size=(150,36), icon_anchor=(7,7), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_region, region_text, df_region.loc[i,'RRR']))).add_to(m) # ### 3.5 Add hubs #Add hub capacities as bubbles if hub_display == True: if LINES == 'Capacity': for i,row in df_hub.iterrows(): folium.CircleMarker( location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], popup=df_hub.loc[i,'RRR'], radius = hub_size, color= hub_color, opacity = 0, fill=True, fill_color= hub_color, fill_opacity = 1 ).add_to(m) if hub_decimals == 0: folium.Marker(location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], icon=DivIcon( icon_size=(150,36), icon_anchor=(7,9), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_hub, hub_text, df_hub.loc[i,'Value'].round(hub_decimals).astype(int)))).add_to(m) else: folium.Marker(location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], icon=DivIcon( icon_size=(150,36), icon_anchor=(7,9), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_hub, hub_text, round(df_hub.loc[i,'Value'], hub_decimals)))).add_to(m) if LINES == 'Flow' or LINES == 'CongestionFlow': for i,row in df_hub.iterrows(): folium.CircleMarker( location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], popup=df_hub.loc[i,'RRR'], radius = hub_size, color= hub_color, opacity = 0, fill=True, fill_color= hub_color, fill_opacity = 1 ).add_to(m) if hub_decimals == 0: folium.Marker(location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], icon=DivIcon( icon_size=(150,36), icon_anchor=(7,9), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_hub, hub_text, df_hub.loc[i,'prod_GWh'].round(hub_decimals).astype(int)))).add_to(m) else: folium.Marker(location=[df_hub.loc[i,'Lat'], df_hub.loc[i,'Lon']], icon=DivIcon( icon_size=(150,36), icon_anchor=(7,9), html='<div style="font-size: {}pt; color : {}">{}</div>'.format(font_hub, hub_text, round(df_hub.loc[i,'prod_GWh'], hub_decimals)))).add_to(m) # ### 3.6 Add Legend # + color_keys = ['color1', 'color2', 'color3'] color_dict = dict(zip(color_keys, flowline_color)) legend_keys = ['item1', 'item2', 'item3'] legend_dict = dict(zip(legend_keys, legend_values)) if LINES == 'CongestionFlow': from branca.element import Template, MacroElement template = """ {% macro html(this, kwargs) %} <!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>jQuery UI Draggable - Default functionality</title> <link rel="stylesheet" href="//code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"> <script src="https://code.jquery.com/jquery-1.12.4.js"></script> <script src="https://code.jquery.com/ui/1.12.1/jquery-ui.js"></script> <script> $( function() { $( "#maplegend" ).draggable({ start: function (event, ui) { $(this).css({ right: "auto", top: "auto", bottom: "auto" }); } }); }); </script> </head> <body> <div id='maplegend' class='maplegend' style='position: absolute; z-index:9999; border:2px solid grey; background-color:rgba(255, 255, 255, 1); border-radius:6px; padding: 10px; font-size:14px; right: 20px; bottom: 20px;'> <div class='legend-title'>Congestion rate</div> <div class='legend-scale'> <ul class='legend-labels'> <li><span style='background:color3;opacity:1;'></span>item1</li> <li><span style='background:color2;opacity:1;'></span> item2 </li> <li><span style='background:color1;opacity:1;'></span> item3 </li> </ul> </div> </div> </body> </html> <style type='text/css'> .maplegend .legend-title { text-align: left; margin-bottom: 5px; font-weight: bold; font-size: 90%; } .maplegend .legend-scale ul { margin: 0; margin-bottom: 5px; padding: 0; float: left; list-style: none; } .maplegend .legend-scale ul li { font-size: 80%; list-style: none; margin-left: 0; line-height: 18px; margin-bottom: 2px; } .maplegend ul.legend-labels li span { display: block; float: left; height: 16px; width: 30px; margin-right: 5px; margin-left: 0; border: 1px solid #999; } .maplegend .legend-source { font-size: 80%; color: #777; clear: both; } .maplegend a { color: #777; } </style> {% endmacro %}""" for key in color_dict.keys(): template = template.replace(key, color_dict[key]) for key in legend_dict.keys(): template = template.replace(key, legend_dict[key]) macro = MacroElement() macro._template = Template(template) m.get_root().add_child(macro) # - # ### 4 Save Output # Make Transmission_Map output folder if not os.path.isdir('output/Transmission_Map/' + LINES + '/' + SCENARIO + '/' + market): os.makedirs('output/Transmission_Map/' + LINES + '/' + SCENARIO + '/' + market) output_dir = 'output/Transmission_Map/' + LINES + '/' + SCENARIO + '/' + market m.save(output_dir + '/' + map_name) # ### 5 Display Map m
base/auxils/plotting_tool/Transmission_Map.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Simple Exponential Smoothing import pandas as pd # Define a dataframe with the forecasts that can be used to demonstrate exponential smoothing # + col_names = ['Day', 'Value'] data = pd.DataFrame(columns = col_names) data['Day'] = [1,2,3,4,5] data['Value'] = [39,44,40,45,38] data # - # **Formula for exponential Smoothing ** # # Ft+1 = α St + ( 1 - α ) Ft # # - Let t be the time(here Day) # - Let Ft be the forecast at time t # - Let St be the Sales at time t # # Let us take the α value to be 0.2 # # We take the forecast for the first day sales value as NA and apply the formula from the second day # def exp_smooth(alpha): ft = 0 forecast = [] for i in range(0,len(data)): if(ft == 0): ft = data['Value'][i] forecast.append(ft) else: ft = alpha*(data['Value'][i]) + (1-alpha)*(ft) forecast.append(ft) return forecast data['alpha=0.2'] = exp_smooth(0.2) data['alpha=0.4'] = exp_smooth(0.4) data['alpha=0.6'] = exp_smooth(0.6) data['alpha=0.8'] = exp_smooth(0.8) data # • α = 1 leads to fast smoothing (nervous, volatile, naïve) # # • α = 0 leads to slow smoothing (calm, staid, cumulative) # + import seaborn as sns import matplotlib.pyplot as plt fig, ax = plt.subplots() sns.pointplot(x='Day', y='Value', data=data, ax=ax) sns.pointplot(x='Day', y='alpha=0.2', data=data, ax=ax, color='r') sns.plt.show() fig, ax = plt.subplots() sns.pointplot(x='Day', y='Value', data=data, ax=ax) sns.pointplot(x='Day', y='alpha=0.4', data=data, ax=ax, color='r') sns.plt.show() fig, ax = plt.subplots() sns.pointplot(x='Day', y='Value', data=data, ax=ax) sns.pointplot(x='Day', y='alpha=0.6', data=data, ax=ax, color='r') sns.plt.show() fig, ax = plt.subplots() sns.pointplot(x='Day', y='Value', data=data, ax=ax) sns.pointplot(x='Day', y='alpha=0.8', data=data, ax=ax, color='r') sns.plt.show() # - # But all these forecasts are not accurate, there is obviously some error # # ### Let us calculate that error def error(col): error = [] error_sq = [] for i in range(0,len(data)): e = data['Value'][i] - data[col][i] error.append(e) error_sq.append(e*e) data['Error'] = error data['Error_Square'] = error_sq error('alpha=0.2') data # ### Mean square error sum(data['Error_Square'][1:])/len(data) error('alpha=0.8') data sum(data['Error_Square'][1:])/len(data) # ## We choose the parameter with minimal error on unseen data
Simple Exponential Smoothing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import sys import pandas as pd import matplotlib.pyplot as plt import matplotlib import numpy as np import astropy.io.fits as pyfits import astropy.utils as autils import requests import json import datetime from pprint import pprint as pp import os import astropy.io.fits as fits # Jielai added modules f import subprocess import seaborn as sns pw = 'XXX' usrname = 'XXX' # Time Counter function import time def tic(): tic.start = time.perf_counter() def toc(): elapsed_seconds = time.perf_counter() - tic.start return elapsed_seconds # fractional # NOAO server Settings natroot = 'https://astroarchive.noirlab.edu' assert natroot == 'https://astroarchive.noirlab.edu', 'Notebook does NOT point to PRODUCTION' print(f"Using server on {natroot}") adsurl = f'{natroot}/api/adv_search' print(f"adsurl = {adsurl}") # Start the timer print(f'Started on: {str(datetime.datetime.now())}') tic() # Start timing the run of this notebook # - jj = { "outfields" : [ "md5sum", "archive_filename", "original_filename", #"telescope", #"instrument", #"obs_type", "proc_type", "prod_type", #"release_date", "proposal", "ra_center", #"ra_min", "dec_center", #"dec_min", # 'The local calendar date of the telescope, at the start of PM observing.' = caldat "caldat", "url", "filesize", "ifilter", #"seeing", "exposure", #"depth", "dateobs_min", "dateobs_max", 'release_date' ], "search" : [ #["release_date", "2020-01-01", "2021-01-01"], # proprietary ["obs_type", 'object'], #["pi", 'cooke'], ["proposal","2020B-0253"], ["proc_type","instcal"], ["prod_type", "image"], ["caldat","2021-06-05", "2021-06-06"] ] } apiurl = f'{adsurl}/fasearch/?limit=200000' print(f'Using API url: {apiurl}') ads_df = pd.DataFrame(requests.post(apiurl,json=jj).json()[1:]) print(len(ads_df)) #ads_df sns.scatterplot(data=ads_df, x="ra_center", y="dec_center") plt.title("all fields observed during DWF run 2021 June") # + # 226.54167 +9.54861 field_name = 'S82sub8' field_RA = 226.54167 field_DEC = 9.54861 dec_offset_allowance = 0.01 RA_offset_allowance = np.abs(dec_offset_allowance*np.cos(field_DEC)) print(dec_offset_allowance,RA_offset_allowance,np.cos(field_DEC)) ra_min = field_RA-RA_offset_allowance ra_max = field_RA+RA_offset_allowance dec_min = field_DEC-dec_offset_allowance dec_max = field_DEC+dec_offset_allowance field_ads_df = ads_df[( (ads_df['ra_center']>ra_min) & (ads_df['ra_center']<ra_max) & (ads_df['dec_center']<dec_max) & (ads_df['dec_center']>dec_min) )] print(len(field_ads_df)) sns.scatterplot(data=ads_df, x="ra_center", y="dec_center", alpha=0.5, marker='x',s=200) plt.ylim(dec_min, dec_max) plt.xlim(ra_min, ra_max) plt.scatter(field_RA,field_DEC) #field_ads_df # - field_ads_df.keys() field_ads_df[['caldat','proc_type','prod_type','ifilter','exposure','original_filename']] # first night, g,i df_1 = field_ads_df[ field_ads_df['caldat']== '2021-06-05'] df_g1 = df_1[ df_1['ifilter']=='g DECam SDSS c0001 4720.0 1520.0'] df_i1 = df_1[ df_1['ifilter']=='i DECam SDSS c0003 7835.0 1470.0'] print( [ x.split('_')[-1].replace('.fits.fz','') for x in df_g1['original_filename'] ]) df_g1[['dateobs_min','caldat','proc_type','prod_type','ifilter','exposure']] print( [ x.split('_')[-1].replace('.fits.fz','') for x in df_i1['original_filename'] ]) df_i1[['caldat','proc_type','prod_type','ifilter','exposure']] # second night, g, i df_2 = field_ads_df[ field_ads_df['caldat']== '2021-06-06'] df_g2 = df_2[ df_2['ifilter']=='g DECam SDSS c0001 4720.0 1520.0'] df_i2 = df_2[ df_2['ifilter']=='i DECam SDSS c0003 7835.0 1470.0'] print( [ x.split('_')[-1].replace('.fits.fz','') for x in df_g2['original_filename'] ]) df_g2[['caldat','proc_type','prod_type','ifilter','exposure']] print( [ x.split('_')[-1].replace('.fits.fz','') for x in df_i2['original_filename'] ]) df_i2[['caldat','proc_type','prod_type','ifilter','exposure']]
notebooks/NOAOarchive_04_JielaiZhang_S82sub8_exposurenumberslookupforFrank.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "skip"} # # Notebook contents: # # This notebook contains a lecture. The code for generating plots are found at the of the notebook. Links below. # # - [presentation](#Session-1b:) # - [code for plots](#Code-for-plots) # + [markdown] slideshow={"slide_type": "slide"} # # Session 1b: # ## An introduction to Python # # *<NAME>* # + [markdown] slideshow={"slide_type": "slide"} # ## Agenda # # 1. [On Data Science](#Why-data-science) # 2. Python an overview: # - [The what, why and how](#Introducing-Python) # - [Some advice](#Help-and-advice) # - [Scripting](#The-python-shell-and-scripts) and [Jupyter](#The-Jupyter-framework) # 3. The Python language # - [Fundamental data types](#Fundamental-data-types) and [debugging](#Debugging) # - [Operators](#Operators) and [control flow](#Control-flow) # - [Containers](#Containers) and [loops](#Loops) # - [Reusable code](#Reusable-code) # # + [markdown] slideshow={"slide_type": "slide"} # # The Academic Quarter # - 9 means 9.15, # - 13 means 13.15 (i.e. 1.15pm) # + [markdown] slideshow={"slide_type": "slide"} # # Why data science # # Some trends # # - **Data** is increasingly available # - Improved **algorithms** and methods for computation # - Faster and bigger **computers** # # Some big successes # - Image and text recognition (e.g. self-driving cars, Google Translate) # - Artificial intelligence (play computer games, poker, chess, game of go) # - Smart services from Silicon valley (virtual assistants, recommendation etc.) # # Consequence > data scientists have HIGHEST entry wages # + [markdown] slideshow={"slide_type": "slide"} # # Beyond data science # # The skills and ideas of data science are spreading beyond # - machine learning is now gaining traction in # - statistics # - theoretical economic modelling # # - smart, free tools for working with # - small and big data on structured (tabular) data # - unstructured data sources from image, text and social media # # Takeaway > data science is useful broadly. # + [markdown] slideshow={"slide_type": "slide"} # # Introducing Python # + [markdown] slideshow={"slide_type": "slide"} # ## Introduction # # *What is Python useful for?* # # * It can do "anything" and [used everywhere](https://www.python.org/about/success/) # * High-tech manufacturing # * Space shuttles # * Large servers # * Python has incredible resources for machine learning, big data, visualizations. # + [markdown] slideshow={"slide_type": "slide"} # ## Ιntroduction (2) # + [markdown] slideshow={"slide_type": "-"} # <center><img src='https://zgab33vy595fw5zq-zippykid.netdna-ssl.com/wp-content/uploads/2017/09/growth_major_languages-1-1024x878.png' alt="Drawing" style="width: 900px;"/></center> # # + [markdown] slideshow={"slide_type": "slide"} # ## Introduction (3) # # *What is Python?* # # A multiparadigm, general purpose programming language. # * Can do everything you can imagine a computer can do. # * E.g. manage databases, advanced computation, web etc. # # + [markdown] slideshow={"slide_type": "fragment"} # *Why Python?* # # Python's main objective is to make programming more ***effortless***. # - This is done by making syntax intuitive. # - A side effect: programming can be fun # - Downside: not the fastest (solved with packages) # + [markdown] slideshow={"slide_type": "slide"} # ## Introduction (4) # # *Is Python the most popular for statistics and data science?* # # There are other good languages, e.g. R, Stata or SAS, why not use them? # # - Python has the best data science packages. # - And it is also being used increasingly in statistics. # + slideshow={"slide_type": "-"} f_py_trend # + [markdown] slideshow={"slide_type": "slide"} # ## Introduction (5) # *How does data science work?* # + [markdown] slideshow={"slide_type": "fragment"} # <center><img src='https://raw.githubusercontent.com/hadley/r4ds/master/diagrams/data-science.png' alt="Drawing" style="width: 1000px;"/></center> # # + [markdown] slideshow={"slide_type": "slide"} # ## Introduction (6) # *What are we going to learn?* # + [markdown] slideshow={"slide_type": "fragment"} # Course competencies: # - Import: scraping and data IO # - Tidy / transform: data structuring and text # - Visualize: plotting # - Model: machine learning # - Dissemination: markdown / Git # + [markdown] slideshow={"slide_type": "slide"} # # Help and advice # + [markdown] slideshow={"slide_type": "slide"} # ## Learning how to code (1) # # This course.. ain't easy.. # + [markdown] slideshow={"slide_type": "fragment"} # Why would you go through this pain? You choose one of two paths. # + [markdown] slideshow={"slide_type": "fragment"} # i. You move on, you forget some or most of the material. # + [markdown] slideshow={"slide_type": "fragment"} # ii. You are lit and your life has changed. # - You may return to become a better sociologist, anthropologit, economist etc. # - Or, you may continue along the new track of data science. # - In any case, you keep learning and expanding your programming skills. # + [markdown] slideshow={"slide_type": "slide"} # ## Learning how to code (2) # # #### <NAME> # # > The bad news is that when ever you learn a new skill you’re going to suck. It’s going to be frustrating. The good news is that is typical and happens to everyone and it is only temporary. You can’t go from knowing nothing to becoming an expert without going through a period of great frustration and great suckiness. # # + [markdown] slideshow={"slide_type": "slide"} # ## Learning how to code (3) # # #### <NAME> # # > One can learn data analysis only by doing, not by reading. # + [markdown] slideshow={"slide_type": "slide"} # ## Learning how to code (4) # # #### Practical advice # # - Do not use the console, write scripts or preferably notebooks instead # # - Be lazy: resuse code and write reusable code (functions) # # - Think before you code # # - Code is a medium of communication # # 1. Between you and the computer # 2. Between you and other people (or future you) # + [markdown] slideshow={"slide_type": "slide"} # ## Learning how to code (5) # # How do we participate optimally? # # # - Practice, practice and more practice. # - Try everything on your own computer # - Type the code in yourself, # - Word-by-word, line-by-line. # - DO NOT copy-paste. # + [markdown] slideshow={"slide_type": "slide"} # ## Guide on getting help # # Whenever you have a question you do as follows: # + [markdown] slideshow={"slide_type": "fragment"} # 1: You ask other people in your group. # + [markdown] slideshow={"slide_type": "fragment"} # 2: You ask the neighboring groups. # + [markdown] slideshow={"slide_type": "fragment"} # 3: You search on Google (more advice will follow). # + [markdown] slideshow={"slide_type": "fragment"} # 4: You raise an [issue in our Github repo](https://github.com/abjer/sds/issues) or you ask us. # + [markdown] slideshow={"slide_type": "slide"} # # The python shell and scripts # + [markdown] slideshow={"slide_type": "slide"} # ## Python interpreter (1) # # *Shell access* # # The fundamental way of accessing Python is from your shell by typing *`python`*. # # Everyone should be able to run the following commands and reproduce the output. # + [markdown] slideshow={"slide_type": "fragment"} # ``` python # >>> print ('hello my friend') # hello my friend # ``` # + [markdown] slideshow={"slide_type": "fragment"} # ``` python # >>> 4*5 # 20 # ``` # + [markdown] slideshow={"slide_type": "slide"} # ## Python interpreter (2) # # *Python scripts* # # The power of the interpreter is that it can be used to execute Python scripts. # + [markdown] slideshow={"slide_type": "fragment"} # What is a script? # + [markdown] slideshow={"slide_type": "fragment"} # These are programs containing code blocks. # + [markdown] slideshow={"slide_type": "slide"} # ## Python interpreter (3) # # + [markdown] slideshow={"slide_type": "-"} # Everyone should be able to make a text file called *`test.py`* in their current folder. The file should contain the following two lines: # + [markdown] slideshow={"slide_type": "-"} # ``` python # print ('Line 1') # print ('Line 2') # ``` # + [markdown] slideshow={"slide_type": "fragment"} # Try executing the test file from the shell by typing: # # *`python test.py`* # # + [markdown] slideshow={"slide_type": "fragment"} # This should yield the following output: # # ``` # Line 1 # Line 2 # ``` # + [markdown] slideshow={"slide_type": "slide"} # # The Jupyter framework # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter (1) # # *What is Jupyter Notebook?* # # - Jupyter provides an interactive and visual platform for working with data. # - It is an abbreviation of Julia, Python, and R. # + [markdown] slideshow={"slide_type": "fragment"} # *Why Jupyter notebook?* # - great for writing. # - markdown, equations and direct visual output; # - interactive allows keeping, changing data etc. # - many tools (e.g. create this slideshow) # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter (2) # # *How do we create a Jupyter Notebook?* # # We start Jupyter Notebook by typing *`jupyter notebook`* in the shell. # + [markdown] slideshow={"slide_type": "fragment"} # Try making a new notebook: # - click the button *`New`* in the upper right corner # - clicking on *`Python 3`*. # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter (3) # # *How do we interact with Jupyter?* # # # Jupyter works by having cells in which you can put code. The active cell has a colored bar next to it. # + [markdown] slideshow={"slide_type": "fragment"} # A cell is *`edit mode`* when there is a <span style="color:green">*green*</span> bar to the left. To activate *`edit mode`* click on a cell. # # A cell is in *`command mode`* when the bar to the left is <span style="color:blue">*blue*</span>. # # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter (4) # # *How do we add and execute cude?* # + [markdown] slideshow={"slide_type": "fragment"} # Go into edit mode - add the following: # + slideshow={"slide_type": "-"} A = 11 B = 26 A * B # + [markdown] slideshow={"slide_type": "fragment"} # Click the &#9658; to run the code in the cell. What happens if we change A+B to A*B? # + [markdown] slideshow={"slide_type": "slide"} # ## Jupyter (5) # # *How can we add cells to our notebook?* # + [markdown] slideshow={"slide_type": "fragment"} # Try creating a new cell by clicking the **`+`** symbol. # + [markdown] slideshow={"slide_type": "slide"} # # # Jupyter (6) # *Most relevant keyboard short cuts* # # Editing and executing cells # - enter edit mode: click inside the cell or press `ENTR` # - exit edit mode: click outside cell or press `ESC`. # - executing code within a cell is `SHFT`+`ENTR` or `CTRL`+`ENTR` (not same!) # # # Adding cell (`a` above, `b` below) and removing cells (press `d` twice) # # More info: # - For tips [see blog post](https:abjer.github.io/sds2019/post/jupyter) or see list Jupyter keyboard shortcuts in menu (top): `Help > Keyboard Shortcuts`. # - General resources in documentation and tutorial available [here](http://jupyter.readthedocs.io/en/latest/). # # # # + [markdown] slideshow={"slide_type": "slide"} # # The Python language # + [markdown] slideshow={"slide_type": "slide"} # ## We begin with a quiz.. # + [markdown] slideshow={"slide_type": "slide"} # # Fundamental data types # + [markdown] slideshow={"slide_type": "slide"} # ## Data types (1) # # Recall the four fundamental data types: `int`, `float`, `str` and `bool`. # - Sometimes know as elementary, primitive or basic. # # Some data types we can change between, e.g. between `float` and `int`. # + slideshow={"slide_type": "-"} int(1.6) # integer conversion always rounds down, i.e. floor # - float(int(1.6)) # it does not retake its former value # + [markdown] slideshow={"slide_type": "fragment"} # We can do the same for converting to `float` and `int` to `str`. Note some conversion are not allowed. # + [markdown] slideshow={"slide_type": "slide"} # ## Data types (2) # # *What is an object in Python?* # + [markdown] slideshow={"slide_type": "fragment"} # - A thing, anything - everything is an object. # + [markdown] slideshow={"slide_type": "fragment"} # *Why use objects?* # + [markdown] slideshow={"slide_type": "fragment"} # - Easy manipulable, powerful methods and flexible attributes. # - We can make complex objects, e.g. estimation methods quite easy. # - Example of a float method: # + slideshow={"slide_type": "-"} (1.5).as_integer_ratio() # + [markdown] slideshow={"slide_type": "slide"} # # Debugging # + [markdown] slideshow={"slide_type": "slide"} # ## Debugging (1) # # *Code fails all the time!* # # + slideshow={"slide_type": "fragment"} A='I am a string' int(A) print(A) # + [markdown] slideshow={"slide_type": "slide"} # ## Debugging (2) # # *How do you fix code errors?* # # + [markdown] slideshow={"slide_type": "fragment"} # Look at the error message: # 1. **Where** is the error? I.e. what linenumber (and which function). # - Inspect the elements from the failure before the error occurs. # - Note: if you use a function you may want to try printing elements # - Try replacing the objects in the line. # # 2. **What** goes wrong? Examples: # - `SyntaxError`: spelling error; `ValueError`: datatype mismatch. # - Hint: reread it several times, search on Google if you do not understand the error. # # + [markdown] slideshow={"slide_type": "slide"} # ## Debugging (3) # # *Exercise: investigate the error we incurred* # # + [markdown] slideshow={"slide_type": "fragment"} # * Look at the answers in this stackoverflow post: [https://stackoverflow.com/questions/8420143](https://stackoverflow.com/questions/8420143). # * An explanation by Blender: # > Somewhere in your text file, a line has the word `id` in it, which can't really be converted to a number. # # + [markdown] slideshow={"slide_type": "slide"} # # Operators and control flow # + [markdown] slideshow={"slide_type": "slide"} # ## Operators # # *What computations can python do?* # + [markdown] slideshow={"slide_type": "fragment"} # - Numeric operators: Output a numeric value from numeric input. # - `+`; `*`; `-`; `/`. # - Comparison operators: Output a boolean value, `True` or `False` # - `==`; `!=` (equal, not equal - input from most object types) # - `>`; `<`. (greater, smaller - input from numeric) # - Logical operators: Output a boolean value from boolean input. # - `and` / `&`; `or` / `|`; `not` / `!` # # # + [markdown] slideshow={"slide_type": "slide"} # ## Operators (2) # # *How can we test an expression in Python?* # + [markdown] slideshow={"slide_type": "fragment"} # We can check the validity of a statement using comparison operations: # + slideshow={"slide_type": "-"} 3 == (2 + 1) # other ops: >, !=, >= # + [markdown] slideshow={"slide_type": "fragment"} # And apply logical operations: # + slideshow={"slide_type": "fragment"} True | False # + [markdown] slideshow={"slide_type": "slide"} # ## Control flow # # *How can we activate code based on data?* # # A conditional execution of code, if a condition is true then active code. # + [markdown] slideshow={"slide_type": "fragment"} # In Python the syntax is easy with the `if` syntax: # # ``` # if condition: # (CODE BLOCK LINE 1) # (CODE BLOCK LINE 2) # ... # ``` # + [markdown] slideshow={"slide_type": "-"} # - condition is either a variable or an expression # - if statement is `True` then execute a code block # + [markdown] slideshow={"slide_type": "slide"} # ## Control flow (2) # # We can use comparison and logical operators directly as they output boolean values. # + slideshow={"slide_type": "-"} if 4 == 4: print ("I'm being executed, yay!") else: print ("Oh no, I'm not being executed!") # + [markdown] slideshow={"slide_type": "slide"} # ## Control flow (3) # # We can make deep control flow structures: # + slideshow={"slide_type": "-"} A = 11 if A>=0: if A==0: print ("I'm exactly zero!") elif A<10: print ("I'm small but positive!") else: print ("I'm large and positive!") else: print ("Oh shoot, I'm negative!") # + [markdown] slideshow={"slide_type": "slide"} # # Containers and loops # + [markdown] slideshow={"slide_type": "slide"} # ## Containers # # *How do we store multiple objects?* # # - We put objects into containers. (Like a bag) # - An example is a `list` where we can add and remove objects # # *What are they useful for?* # - We can use them to compute statistics (max, min, mean) # + [markdown] slideshow={"slide_type": "slide"} # ## Sequential containers # # *Which data types are ordered?* # # - Sequential containers are ordered from first to last. # - They can be accessed using their element using integer position/order. # - Done with square bracket syntax `[]` # - Note **first element is 0, and last is n-1!** # - One exception are iterators (`iter`) which are incredibly fast. # + [markdown] slideshow={"slide_type": "slide"} # ## Sequential containers (2) # # *Which containers are sequential?* # # - `list` which we can modify (**mutable**). # - useful to collect data on the go # - `tuple` which is after initial assignment (**immutable**) # - tuples are faster as they can do less things # - `array` # - which is mutable in content (i.e. we can change elements # - but immutable in size # - great for data analysis # # + [markdown] slideshow={"slide_type": "slide"} # ## Lists # # A list can be modified (mutated) by methods, e.g. # - We can `append` objects to it and remove `remove` them again. # - We can use operations like `+` and `*`. # # + slideshow={"slide_type": "-"} list_1 = ['A', 'B'] list_2 = ['C', 'D'] list_1 + list_2 # + [markdown] slideshow={"slide_type": "slide"} # ## Non-sequential types # # *Are there any non-sequential containers?* # - A dictionary (`dict`) which are accessed by keys (immutable objects). # - Focus of tomorrow. # - A `set` where elements are # - unique (no duplicates) # - not ordered # - disadvantage: cannot access specific elements! # # + [markdown] slideshow={"slide_type": "slide"} # # Loops # + [markdown] slideshow={"slide_type": "slide"} # ## For loops # # *Why are containers so powerful?* # # # + [markdown] slideshow={"slide_type": "fragment"} # We can iterate over elements in a container - this creates a *finite* loop, called the `for` loop. # # Example - try the following code: # + slideshow={"slide_type": "-"} A = [] for i in range(4): i_squared = i**2 A.append(i_squared) print(A) # + [markdown] slideshow={"slide_type": "fragment"} # For loops are smart when: iterating over files in a directory; iterating over specific set of columns. # + [markdown] slideshow={"slide_type": "fragment"} # How does Python know where the code associated with inside of the loop begins? # + [markdown] slideshow={"slide_type": "slide"} # ## The one line loop # # *What is the fastest way to write a loop?* # + [markdown] slideshow={"slide_type": "fragment"} # Using list comprehension (also work for containers): # + slideshow={"slide_type": "-"} A = [i**2 for i in range(4)] print(A) # + [markdown] slideshow={"slide_type": "slide"} # # ## While loops # # *Can we make a loop without specifying the end?* # + [markdown] slideshow={"slide_type": "fragment"} # Yes, this is called a `while` loop. Example - try the following code: # # + slideshow={"slide_type": "-"} i = 0 L = [] while (i<3): L.append(i*2) i += 1 print(L) # + [markdown] slideshow={"slide_type": "fragment"} # Applications # - Can be applied in scraping, model which converges, etc. # - Make server process that keeps running # + [markdown] slideshow={"slide_type": "slide"} # # Reusable code # + [markdown] slideshow={"slide_type": "fragment"} # *Why do we reuse code?* # + [markdown] slideshow={"slide_type": "fragment"} # - To save time. # + [markdown] slideshow={"slide_type": "fragment"} # - To learn from other or 'borrow' their code. # + [markdown] slideshow={"slide_type": "slide"} # ## Functions # + [markdown] slideshow={"slide_type": "fragment"} # *What procedures have we seen?* # # + [markdown] slideshow={"slide_type": "fragment"} # *How can we make a reusable procedure?* # # - We make a Python function with the `def` syntax. # + slideshow={"slide_type": "fragment"} def squared_plus_1(x): # takes input x x_sq = x**2 # x squared return x_sq + 1 # plus one for output squared_plus_1(2) # + [markdown] slideshow={"slide_type": "slide"} # ## Class # + [markdown] slideshow={"slide_type": "fragment"} # *Where do objects come from?* # # - They come from Python `class` which broadly define what they are. # - E.g. a chair has certain # - attributes, e.g. kind legs, whether or not have back, armrests # - methods e.g. have a person seated # # # # + [markdown] slideshow={"slide_type": "slide"} # ## Modules (1) # # *Do I have to program everything myself?* # # + [markdown] slideshow={"slide_type": "fragment"} # - Nope, you can [load other people's stuff](https://imgur.com/gallery/ta0DGi3). # - This is how we overcome some of Python's limitations of speed etc. # + [markdown] slideshow={"slide_type": "slide"} # ## Modules (2) # *How the hell can I do that?* # + [markdown] slideshow={"slide_type": "fragment"} # We load a module. Try importing numpy: # + slideshow={"slide_type": "-"} import numpy as np # + [markdown] slideshow={"slide_type": "fragment"} # Let's create an `array` with numpy. # + slideshow={"slide_type": "-"} row1 = [1,2] row2 = [3,4] table = [row1, row2] arr = np.array(table) arr # + [markdown] slideshow={"slide_type": "slide"} # # Modules (3) # *What is a numpy array?* # + [markdown] slideshow={"slide_type": "fragment"} # An n-dimensional array with certain available methods. In 2-d it is a matrix, in 3-d it is a tensor. # + [markdown] slideshow={"slide_type": "fragment"} # Objects can have useful attributes and methods, that are built-in. These are accessed using `"."` # # Example, an array can be transposed as follows: # + slideshow={"slide_type": "-"} arr.T # + [markdown] slideshow={"slide_type": "slide"} # # Modules (4) # *Why are numpy arrays smart?* # + [markdown] slideshow={"slide_type": "fragment"} # Crazy fast and compact notation. # - E.g. we can use the same numeric and boolean operations: # + slideshow={"slide_type": "-"} arr + 5 # + [markdown] slideshow={"slide_type": "slide"} # # Final remarks # + [markdown] slideshow={"slide_type": "slide"} # ## Quiz time, again.. # + [markdown] slideshow={"slide_type": "slide"} # ## Summary # In this lecture we learned how to use: # - Python motivation # - Python scripts and Jupyter # - Fundamentals of Python, including # - [Fundamental data types](#Fundamental-data-types) and [debugging](#Debugging) # - [Operators](#Operators) and [control flow](#Control-flow) # - [Containers](#Containers) and [loops](#Loops) # - [Reusable code](#Reusable-code) # # + [markdown] slideshow={"slide_type": "slide"} # # The end # [Return to agenda](#Agenda) # + [markdown] slideshow={"slide_type": "skip"} # # Code for plots # ### Load software # + slideshow={"slide_type": "skip"} import matplotlib.pyplot as plt import numpy as np import pandas as pd import requests import seaborn as sns plt.style.use('ggplot') # %matplotlib inline SMALL_SIZE = 16 MEDIUM_SIZE = 18 BIGGER_SIZE = 20 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title # + [markdown] slideshow={"slide_type": "skip"} # Import and format data # + slideshow={"slide_type": "skip"} ds = pd.read_csv('data_science.csv',skiprows=1,index_col=0) stats = pd.read_csv('statistics.csv',skiprows=1,index_col=0) f_py_trend,ax = plt.subplots(1,2, figsize=(14,5)) plot_jobs = ('Data science', ds), ('Statistics', stats) for i, (title, df) in enumerate(plot_jobs): df.index = pd.Index(pd.to_datetime(df.index), name='') df.columns = [c.split(' ')[0].capitalize() for c in df.columns] df.resample('M').mean().plot(ax=ax[i]) ax[i].set_title(title+' and ..', fontsize=BIGGER_SIZE) if i==0: ax[i].set_ylabel('Google Trends Index')
material/session_1/lecture_1b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand. # # (i.e., `[0,1,2,4,5,6,7]` might become `[4,5,6,7,0,1,2]`). # # You are given a target value to search. If found in the array return its index, otherwise return -1. # # You may assume no duplicate exists in the array. # # Your algorithm's runtime complexity must be in the order of O(log n). # # Example 1: # # Input: nums = [4,5,6,7,0,1,2], target = 0 # Output: 4 # Example 2: # # Input: nums = [4,5,6,7,0,1,2], target = 3 # Output: -1 # + class Solution(object): def search(self, nums, target): """ :type nums: List[int] :type target: int :rtype: int """ if not nums: return -1 i, j = 0, len(nums) - 1 while i <= j: mid = (i + j) // 2 if nums[mid] == target: return mid if nums[i] <= nums[mid]: if nums[i] <= target <= nums[mid]: j = mid - 1 else: i = mid + 1 else: if nums[mid] <= target <= nums[j]: i = mid + 1 else: j = mid - 1 return -1 # test nums = [4,5,6,7,0,1,2] target = 0 Solution().search(nums, target)
DSA/arrays/searchInRotatedArray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="ssCOanHc8JH_" # # Training Mutual Information Maximization (MI-Max) RL algorithms in Brax # # In [Brax Training](https://colab.research.google.com/github/google/brax/blob/main/notebooks/training.ipynb) we tried out [gym](https://gym.openai.com/)-like environments and PPO, SAC, evolutionary search, and trajectory optimization algorithms. We can build various RL algorithms on top of these ultra-fast implementations. This colab runs a family of [variational GCRL](https://arxiv.org/abs/2106.01404) algorithms or MI-maximization (MI-max) algorithms, which include [goal-conditioned RL](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.51.3077) and [DIAYN](https://arxiv.org/abs/1802.06070) as special cases. Let's try it out! # # This provides a bare bone implementation based on minimal modifications to the # baseline [PPO](https://github.com/google/brax/blob/main/brax/training/ppo.py), # enabling training in a few minutes. More features, examples, and benchmarked results will be added. # + [markdown] id="VYe1kc3a4Oxc" # # # ``` # # This is formatted as code # ``` # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/brax/blob/main/notebooks/braxlines/mimax.ipynb) # + id="rlVNS8JstMRr" #@title Colab setup and imports #@markdown ## ⚠️ PLEASE NOTE: #@markdown This colab runs best using a TPU runtime. From the Colab menu, choose Runtime > Change Runtime Type, then select **'TPU'** in the dropdown. from datetime import datetime import functools import math import os import pprint import jax import jax.numpy as jnp from IPython.display import HTML, clear_output import matplotlib.pyplot as plt try: import brax except ImportError: # !pip install git+https://github.com/google/brax.git@main clear_output() import brax from brax.io import file as io_file from brax.io import html from brax.experimental.composer import composer from brax.experimental.braxlines.common import evaluators from brax.experimental.braxlines.common import logger_utils from brax.experimental.braxlines.training import ppo from brax.experimental.braxlines.vgcrl import evaluators as vgcrl_evaluators from brax.experimental.braxlines.vgcrl import utils as vgcrl_utils import tensorflow_probability as tfp tfp = tfp.substrates.jax tfd = tfp.distributions if "COLAB_TPU_ADDR" in os.environ: from jax.tools import colab_tpu colab_tpu.setup_tpu() # + id="gh4QsRPnX770" #@title Define task and experiment parameters #@markdown **Task Parameters** #@markdown #@markdown As in [DIAYN](https://arxiv.org/abs/1802.06070) #@markdown and [VGCRL](https://arxiv.org/abs/2106.01404), #@markdown we assume some task knowledge about interesting dimensions #@markdown of the environment `obs_indices` and their range `obs_scale`. #@markdown This is also used for evaluation and visualization. #@markdown #@markdown When the **task parameters** are the same, the metrics computed by #@markdown [vgcrl/evaluators.py](https://github.com/google/brax/blob/main/brax/experimental/braxlines/vgcrl/evaluators.py) #@markdown are directly comparable across experiment runs with different #@markdown **experiment parameters**. env_name = 'ant' # @param ['ant', 'halfcheetah', 'uni_ant', 'bi_ant'] obs_indices = 'vel' # @param ['vel'] obs_scale = 5.0 #@param{'type': 'number'} obs_indices_str = obs_indices obs_indices = dict( vel=dict( ant = (13,14), humanoid = (22, 23), halfcheetah = (11,), uni_ant = (('vel:torso_ant1', 0),('vel:torso_ant1', 1)), bi_ant = (('vel:torso_ant1', 0),('vel:torso_ant2', 0)), ), )[obs_indices][env_name] #@markdown **Experiment Parameters** #@markdown See [vgcrl/utils.py](https://github.com/google/brax/blob/main/brax/experimental/braxlines/vgcrl/utils.py) evaluate_mi = False # @param{'type': 'boolean'} evaluate_lgr = False # @param{'type': 'boolean'} algo_name = 'diayn' # @param ['gcrl', 'cdiayn', 'diayn', 'diayn_full', 'fixed_gcrl'] logits_clip_range = 5.0 # @param {'type': 'number'} env_reward_multiplier = 0# @param{'type': 'number'} normalize_obs_for_disc = False # @param {'type': 'boolean'} normalize_obs_for_rl = True # @param {'type': 'boolean'} seed = 0# @param {type: 'integer'} diayn_num_skills = 8 # @param {type: 'integer'} spectral_norm = True # @param {'type': 'boolean'} output_path = '' # @param {'type': 'string'} task_name = "" # @param {'type': 'string'} exp_name = '' # @param {'type': 'string'} if output_path: output_path = output_path.format( date=datetime.now().strftime('%Y%m%d')) task_name = task_name or f'{env_name}_{obs_indices_str}_{obs_scale}' exp_name = exp_name or algo_name output_path = f'{output_path}/{task_name}/{exp_name}' print(f'output_path={output_path}') # + id="NaJDZqhCLovU" # @title Initialize Brax environment visualize = False # @param{'type': 'boolean'} # Create baseline environment to get observation specs base_env_fn = composer.create_fn(env_name=env_name) base_env = base_env_fn() # Create discriminator-parameterized environment disc = vgcrl_utils.create_disc_fn(algo_name=algo_name, observation_size=base_env.observation_size, obs_indices=obs_indices, scale=obs_scale, diayn_num_skills = diayn_num_skills, logits_clip_range=logits_clip_range, spectral_norm=spectral_norm, env=base_env, normalize_obs=normalize_obs_for_disc)() extra_params = disc.init_model(rng=jax.random.PRNGKey(seed=seed)) env_fn = vgcrl_utils.create_fn(env_name=env_name, wrapper_params=dict( disc=disc, env_reward_multiplier=env_reward_multiplier)) # make inference functions and goals for LGR metric core_env = env_fn() params, inference_fn = ppo.make_params_and_inference_fn( core_env.observation_size, core_env.action_size, normalize_observations=normalize_obs_for_rl, extra_params=extra_params) inference_fn = jax.jit(inference_fn) goals = tfd.Uniform(low=-disc.obs_scale, high=disc.obs_scale).sample( seed=jax.random.PRNGKey(0), sample_shape=(10,)) # Visualize if visualize: env = env_fn() jit_env_reset = jax.jit(env.reset) state = jit_env_reset(rng=jax.random.PRNGKey(seed=seed)) clear_output() # clear out jax.lax warning before rendering HTML(html.render(env.sys, [state.qp])) # + id="4vgMSWODfyMC" #@title Training num_timesteps_multiplier = 3# @param {type: 'number'} ncols = 5 # @param{type: 'integer'} tab = logger_utils.Tabulator(output_path=f'{output_path}/training_curves.csv', append=False) # We determined some reasonable hyperparameters offline and share them here. n = num_timesteps_multiplier train_fn = functools.partial( ppo.train, num_timesteps=int(50_000_000 * n), log_frequency=20, reward_scaling=10, episode_length=1000, normalize_observations=normalize_obs_for_rl, action_repeat=1, unroll_length=5, num_minibatches=32, num_update_epochs=4, discounting=0.95, learning_rate=3e-4, entropy_cost=1e-2, num_envs=2048, batch_size=1024) times = [datetime.now()] plotdata = {} plotkeys = ['eval/episode_reward', 'losses/disc_loss', 'metrics/lgr', 'metrics/entropy_all_', 'metrics/entropy_z_', 'metrics/mi_'] def plot(output_path:str =None, output_name:str = 'training_curves'): matched_keys = [key for key in sorted(plotdata.keys()) if any(plotkey in key for plotkey in plotkeys)] num_figs = len(matched_keys) nrows = int(math.ceil(num_figs/ncols)) fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(3.5 * ncols, 3 * nrows)) for i, key in enumerate(matched_keys): col, row = i % ncols, int(i/ncols) ax = axs if nrows > 1: ax = ax[row] if ncols > 1: ax = ax[col] ax.plot(plotdata[key]['x'], plotdata[key]['y']) ax.set(xlabel='# environment steps', ylabel=key) ax.set_xlim([0, train_fn.keywords['num_timesteps']]) fig.tight_layout() if output_path: with io_file.File(f'{output_path}/{output_name}.png', 'wb') as f: plt.savefig(f) def progress(num_steps, metrics, params): if evaluate_mi: mi_metrics = vgcrl_evaluators.estimate_empowerment_metric( env_fn=env_fn, disc=disc, inference_fn=inference_fn, params=params, num_z=10, num_samples_per_z=10, time_subsampling=1, time_last_n=500, num_1d_bins = 1000, num_2d_bins = 1000, custom_obs_indices = list(range(core_env.observation_size))[:30], custom_obs_scale = obs_scale, verbose = True, seed=0) metrics.update(mi_metrics) if evaluate_lgr: lgr_metrics = vgcrl_evaluators.estimate_latent_goal_reaching_metric( params=params, env_fn = env_fn, disc=disc, inference_fn=inference_fn, goals=goals, num_samples_per_z=10, time_subsampling=1, time_last_n=500, seed=0) metrics.update(lgr_metrics) times.append(datetime.now()) for key, v in metrics.items(): plotdata[key] = plotdata.get(key, dict(x=[], y=[])) plotdata[key]['x'] += [num_steps] plotdata[key]['y'] += [v] # the first step does not include losses if num_steps > 0: tab.add(num_steps=num_steps, **metrics) tab.dump() clear_output(wait=True) plot() plt.show() extra_loss_fns = dict(disc_loss=disc.disc_loss_fn) if extra_params else None _, params, _ = train_fn( environment_fn=env_fn, progress_fn=progress, extra_params=extra_params, extra_loss_fns=extra_loss_fns, ) clear_output(wait=True) plot(output_path=output_path) print(f'time to jit: {times[1] - times[0]}') print(f'time to train: {times[-1] - times[1]}') # + id="p5eWOxg7RmQQ" #@title Visualizing skills of the learned inference function in 2D plot num_z = 5 # @param {type: 'integer'} num_samples_per_z = 5 # @param {type: 'integer'} time_subsampling = 10 # @param {type: 'integer'} time_last_n = 500 # @param {type: 'integer'} eval_seed = 0 # @param {type: 'integer'} vgcrl_evaluators.visualize_skills( env_fn=env_fn, disc=disc, inference_fn=inference_fn, params=params, output_path=output_path, verbose=True, num_z=num_z, num_samples_per_z=num_samples_per_z, time_subsampling=time_subsampling, time_last_n=time_last_n, seed=eval_seed) plt.show() # + id="VpAxzRnRu_ej" # @title Estimate [Latent Goal Reaching metric](https://arxiv.org/abs/2106.01404) num_samples_per_z = 10# @param {type: 'integer'} time_subsampling = 1 # @param {type: 'integer'} time_last_n = 500 # @param {type: 'integer'} eval_seed = 0 # @param {type: 'integer'} metrics = vgcrl_evaluators.estimate_latent_goal_reaching_metric( params=params, env_fn = env_fn, disc=disc, inference_fn=inference_fn, goals=goals, num_samples_per_z=num_samples_per_z, time_subsampling=time_subsampling, time_last_n=time_last_n, seed=eval_seed, ) pprint.pprint(metrics) # + id="Uf5Jvf11NWUm" #@title Estimate empowerment metrics using 1D/2D binning num_z = 10# @param {type: 'integer'} num_samples_per_z = 10# @param {type: 'integer'} time_subsampling = 1 # @param {type: 'integer'} time_last_n = 500 # @param {type: 'integer'} eval_seed = 0 # @param {type: 'integer' num_1d_bins = 1000 # @param {type: 'integer'} num_2d_bins = 30# @param {type: 'integer'} metrics = vgcrl_evaluators.estimate_empowerment_metric( env_fn=env_fn, disc=disc, inference_fn=inference_fn, params=params, num_z=num_z, num_samples_per_z=num_samples_per_z, time_subsampling=time_subsampling, time_last_n=time_last_n, num_1d_bins = num_1d_bins, num_2d_bins = num_2d_bins, verbose = True, seed=eval_seed) pprint.pprint(metrics) # + id="RNMLEyaTspEM" #@title Visualizing a trajectory of the learned inference function #@markdown If `z_value` is `None`, sample `z`, else fix `z` to `z_value`. z_value = 1# @param {'type': 'raw'} eval_seed = 0 # @param {'type': 'integer'} z = { 'fixed_gcrl': jnp.ones(disc.z_size) * z_value, 'gcrl': jnp.ones(disc.z_size) * z_value, 'cdiayn': jnp.ones(disc.z_size) * z_value, 'diayn': jax.nn.one_hot(jnp.array(int(z_value)), disc.z_size), 'diayn_full': jax.nn.one_hot(jnp.array(int(z_value)), disc.z_size), }[algo_name] if z_value is not None else None env, states = evaluators.visualize_env( env_fn=env_fn, inference_fn=inference_fn, params=params, batch_size=0, seed = eval_seed, reset_args = (z,), step_args = (params['normalizer'], params['extra']), output_path=output_path, output_name=f'video_z_{z_value}', ) HTML(html.render(env.sys, [state.qp for state in states]))
notebooks/braxlines/mimax.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/maruvadaItis/Movie-Recommender-System/blob/master/classifier_Phase.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="jK1fQ6LKJpSl" colab_type="code" colab={} import pandas as pd import numpy as np import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns import re import pickle #import mglearn import time from nltk.tokenize import TweetTokenizer # doesn't split at apostrophes import nltk from nltk import Text from nltk.tokenize import regexp_tokenize from nltk.tokenize import word_tokenize from nltk.tokenize import sent_tokenize from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.multiclass import OneVsRestClassifier from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.metrics import accuracy_score from sklearn.svm import LinearSVC # + id="wPX7D6bhLA17" colab_type="code" outputId="4d2d4225-f8b4-40bd-ca7a-d92b934810ec" colab={"base_uri": "https://localhost:8080/", "height": 33} movies = pd.read_csv('/content/movies.csv', delimiter=',') movies.dataframeName = 'movies.csv' nRow, nCol = movies.shape print(f'There are {nRow} rows and {nCol} columns') # + id="uE_CW7kdMMwO" colab_type="code" outputId="6452ecc6-d2cb-41f6-d27b-30082389efd7" colab={"base_uri": "https://localhost:8080/", "height": 246} movies.head() # + id="GU0hdElt9-Wa" colab_type="code" outputId="eb29c0e6-1e30-413d-e55c-5d937cdcdbfc" colab={"base_uri": "https://localhost:8080/", "height": 33} movies['Count']=1 movies[['Genre','Count']].groupby(['Genre'], as_index=False).count().shape[0] # + id="0YNdQvkdMYmy" colab_type="code" colab={} movies['GenreCorrected'] =movies['Genre'] movies['GenreCorrected']=movies['GenreCorrected'].str.strip() movies['GenreCorrected']=movies['GenreCorrected'].str.replace(' - ', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace(' / ', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace('/', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace(' & ', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace(', ', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace('; ', '|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace(' ','|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace(',','|') movies['GenreCorrected']=movies['GenreCorrected'].str.replace('-','') # + id="DqeGMk_YMlHL" colab_type="code" outputId="05161ece-71ef-49f5-eeff-1313a4e982c5" colab={"base_uri": "https://localhost:8080/", "height": 33} moviesGenre = movies[['GenreCorrected','Count']].groupby(['GenreCorrected']).count() moviesGenre.to_csv('GenreCorrected.csv',sep=',') movies[['GenreCorrected','Count']].groupby(['GenreCorrected'], as_index=False).count().shape[0] # + id="3Qq3WDzeMpD0" colab_type="code" outputId="86011bb0-53c5-49b6-a411-a2d1a1be0b7f" colab={"base_uri": "https://localhost:8080/", "height": 345} movies[['GenreCorrected','Count']].groupby(['GenreCorrected'],as_index=False).count().sort_values(['Count'], ascending=False).head(10) # + id="28aDVQg3MpAa" colab_type="code" colab={} movies['GenreCorrected'] = movies['GenreCorrected'].apply(str) # + id="aJWH5DMdMo99" colab_type="code" outputId="7a494112-02bd-4121-be62-01afca7ded6d" colab={"base_uri": "https://localhost:8080/", "height": 33} movies['GenreSplit']=movies['GenreCorrected'].str.split('|') movies['GenreSplit']= movies['GenreSplit'].apply(np.sort).apply(np.unique) movies['GenreSplit'][11] # + id="utzafGqIMo7Q" colab_type="code" outputId="0a22a4b7-af5f-4c5d-ae05-e705bff9a337" colab={"base_uri": "https://localhost:8080/", "height": 50} genres_array = np.array([]) for i in range(0,movies.shape[0]-1): genres_array = np.concatenate((genres_array, movies['GenreSplit'][i] )) genres_array # + id="snX_Dr7PNA_U" colab_type="code" outputId="cd7f87b6-2b57-4201-9a0c-7339b37737f9" colab={"base_uri": "https://localhost:8080/", "height": 345} genres = pd.DataFrame({'Genre':genres_array}) genres.head(10) # + id="SM4PjS4mNA77" colab_type="code" outputId="80d7af30-9d63-4d94-e9d1-e11415ea4667" colab={"base_uri": "https://localhost:8080/", "height": 345} genres['Count']=1 genres[['Genre','Count']].groupby(['Genre'], as_index=False).sum().sort_values(['Count'], ascending=False).head(10) # + id="yT3p58iLNA5M" colab_type="code" outputId="91042a67-6e38-444a-d579-521f2eb3568c" colab={"base_uri": "https://localhost:8080/", "height": 733} genres=genres[['Genre','Count']].groupby(['Genre'], as_index=False).sum().sort_values(['Count'], ascending=False) genres = genres[genres['Genre']!=''] genres.head(30) # + id="hOUhCWxjNA2s" colab_type="code" outputId="7d519d8a-8252-4172-9c59-d3f938f94777" colab={"base_uri": "https://localhost:8080/", "height": 33} TotalCountGenres=sum(genres['Count']) TotalCountGenres # + id="UVvkhnALNA0O" colab_type="code" outputId="3b43066e-6267-4029-fb9e-2217901bf561" colab={"base_uri": "https://localhost:8080/", "height": 644} genres['Frequency'] = genres['Count']/TotalCountGenres genres['CumulativeFrequency'] = genres['Frequency'].cumsum() genres.head(20) # + id="9vhGLuwFNAxt" colab_type="code" outputId="c0e1cdab-7ad4-4b67-d00c-2213c63f5ac0" colab={"base_uri": "https://localhost:8080/", "height": 84} np.array(genres[genres['CumulativeFrequency']<=.985]['Genre']) # + id="aF8fgM-gMo4m" colab_type="code" outputId="11fb26b9-9e87-43c7-d447-bf7ff4087113" colab={"base_uri": "https://localhost:8080/", "height": 410} genres[genres['CumulativeFrequency']<=.985][['Genre','Count']].plot(x='Genre', y='Count', kind='bar', legend=False, grid=True, figsize=(8, 5)) plt.title("Number of movies per genre") plt.ylabel('# of Occurrences', fontsize=12) plt.xlabel('Movie genres', fontsize=12) plt.show() # + id="S5ht98TnOaGU" colab_type="code" outputId="c61404e8-0f0e-4df8-c309-a6ac724e12ca" colab={"base_uri": "https://localhost:8080/", "height": 33} mainGenres=np.array(genres[genres['CumulativeFrequency']<=.957]['Genre']) arr1=np.array(['horror', 'romance', 'scifi','and']) arr1[np.in1d(arr1,mainGenres)] # + id="I3Xfd19SOaCy" colab_type="code" outputId="a19a659f-0377-4c25-bbbc-893f3d56aa97" colab={"base_uri": "https://localhost:8080/", "height": 67} movies['GenreSplit'][10:12].apply(lambda x: x[np.in1d(x,mainGenres)]) # + id="MYX8UFKP6hG4" colab_type="code" outputId="b487df1c-995f-4adb-c01e-b1298f80a6ba" colab={"base_uri": "https://localhost:8080/", "height": 644} movies['GenreSplitMain'] = movies['GenreSplit'].apply(lambda x: x[np.in1d(x,mainGenres)]) movies[['GenreSplitMain','GenreSplit','Genre']][200:220] # + id="onUoH5UJMo16" colab_type="code" colab={} def clean_text(text): text = text.lower() text = re.sub(r"what's", "what is ", text) text = re.sub(r"\'s", " ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"can't", "can not ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"i'm", "i am ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r"\'scuse", " excuse ", text) #text = re.sub('\W', ' ', text) #text = re.sub('\s+', ' ', text) text = text.strip(' ') return text # + id="msfdxVWiSy97" colab_type="code" outputId="8167b7d4-0778-4f70-e9f5-7ec5eb96926c" colab={"base_uri": "https://localhost:8080/", "height": 70} #list(movies['overview'][10:12].apply(clean_text)) #movies['overview']=movies['overview'].apply(str) list(movies['Plot'][10:12].apply(clean_text)) # + id="0FNoLihESy6m" colab_type="code" outputId="39c7af6c-9b9e-4a3c-d9b7-0c301c920031" colab={"base_uri": "https://localhost:8080/", "height": 70} list(movies['Plot'][10:12]) # + id="MWtGZ4W6Sy4Q" colab_type="code" colab={} movies['Plot']=movies['Plot'].apply(str) movies['PlotClean'] = movies['Plot'].apply(clean_text) # + id="yPCBe1TFSy1R" colab_type="code" outputId="ff443e99-5429-4a7b-cde2-e4331570ea25" colab={"base_uri": "https://localhost:8080/", "height": 226} movies[['Plot','PlotClean']][6:12] # + id="azemiViHTI9K" colab_type="code" outputId="f11a8b70-dc9f-4fe2-fba3-d661172654a5" colab={"base_uri": "https://localhost:8080/", "height": 226} movies[['Plot','PlotClean','GenreSplitMain']][6:12] # + id="KF3ICFSjTI6A" colab_type="code" outputId="711aa3e2-8684-4183-a169-d6cf08fe03ab" colab={"base_uri": "https://localhost:8080/", "height": 33} len(movies['GenreSplitMain'][0]) # + id="spoX1hZdTI3K" colab_type="code" outputId="c92f8470-34b2-4261-c7ce-a23f7f20558e" colab={"base_uri": "https://localhost:8080/", "height": 117} movies['GenreSplitMain'][0:5].apply(len) # + id="bFYppTMOTI0S" colab_type="code" outputId="09a1319c-0675-4cba-f076-c70f3aa6af7f" colab={"base_uri": "https://localhost:8080/", "height": 33} movies['MainGenresCount'] = movies['GenreSplitMain'].apply(len) max(movies['MainGenresCount'] ) # + id="Hq5-VhpvTIxx" colab_type="code" outputId="9b9b6130-0205-4329-e858-3e16e1efc924" colab={"base_uri": "https://localhost:8080/", "height": 294} movies[movies['MainGenresCount']==8] # + id="t8GTLRX3TIvF" colab_type="code" outputId="90faeb05-9ac1-47c6-eef1-c2fe9992733d" colab={"base_uri": "https://localhost:8080/", "height": 298} movies['MainGenresCount'].hist() plt.title("Number of movies by number of genres") plt.ylabel('# of movies', fontsize=12) plt.xlabel('# of genres', fontsize=12) plt.show() # + id="ZkW6fuWuTIsg" colab_type="code" outputId="5d1157aa-8a5b-4890-caaf-e7457e3c0aa2" colab={"base_uri": "https://localhost:8080/", "height": 614} movies.GenreSplitMain[0:19].apply(lambda x: '-'.join(x)).str.split(pat='-',n=5,expand=True) # + id="LKtVd6qYieRs" colab_type="code" outputId="0bce648d-8326-48b6-eef2-e3ea363af6a9" colab={"base_uri": "https://localhost:8080/", "height": 316} movies.GenreSplitMain[6:15].apply(lambda x: '-'.join(x)).str.get_dummies(sep='-') # + id="6AdSQNXCiePK" colab_type="code" outputId="24210958-f6fa-4f17-a640-5326845efa28" colab={"base_uri": "https://localhost:8080/", "height": 184} movies.GenreSplitMain[6:15] # + id="C2gvSD-uieMt" colab_type="code" outputId="6324f204-2e22-4113-9bfe-9c5d2905e712" colab={"base_uri": "https://localhost:8080/", "height": 67} movies.columns # + id="3jjUnGULieKX" colab_type="code" outputId="47f1b3df-ceb9-4ee5-c668-83725818dbdc" colab={"base_uri": "https://localhost:8080/", "height": 33} movies.shape # + id="QF8CBcCkieIC" colab_type="code" outputId="d1000cab-962d-49b9-b50a-15f8d5c04326" colab={"base_uri": "https://localhost:8080/", "height": 33} len(movies.Title.unique()) # + id="-Ij-40p8ieFY" colab_type="code" outputId="0a328ef0-90ea-46ea-b388-ae8e21bcd8b0" colab={"base_uri": "https://localhost:8080/", "height": 33} movies[movies.GenreCorrected==''].shape # + id="HGHq1vSTieCs" colab_type="code" colab={} movies = pd.concat([movies, movies.GenreSplitMain.apply(lambda x: '-'.join(x)).str.get_dummies(sep='-')], axis=1) # + id="RoW-0ov-id_o" colab_type="code" colab={} MoviesTrain, MoviesTest = train_test_split(movies[movies.GenreCorrected!=''], random_state=42, test_size=0.30, shuffle=True) # + id="r4L9mAnoid8k" colab_type="code" colab={} tfidf = TfidfVectorizer(stop_words ='english', smooth_idf=False, sublinear_tf=False, norm=None, analyzer='word') # + id="R1UXcjl9iodf" colab_type="code" colab={} x_train = tfidf.fit_transform(MoviesTrain.PlotClean) x_test = tfidf.transform(MoviesTest.PlotClean) # + id="bjMpKbh4iobU" colab_type="code" outputId="3d0c079f-ea3c-4f39-aee8-8a29b95c073f" colab={"base_uri": "https://localhost:8080/", "height": 33} print('nrow of the MoviesTrain ={}'. format(MoviesTrain.shape[0])) # + id="xzrigCKWioYb" colab_type="code" outputId="62b272d8-409e-4007-b971-51419fe030d0" colab={"base_uri": "https://localhost:8080/", "height": 33} print('nrow of the MoviesTest ={}'. format(MoviesTest.shape[0])) # + id="t53a8xQhioWF" colab_type="code" outputId="8e308345-28ae-4558-dece-a48c3ec68bf6" colab={"base_uri": "https://localhost:8080/", "height": 33} type(x_train) # + id="qZysN8qLioT3" colab_type="code" outputId="438e15f4-3bb1-46f6-daa6-3d0726f3457e" colab={"base_uri": "https://localhost:8080/", "height": 134} x_train.toarray() # + id="1_aydYvgioRZ" colab_type="code" outputId="7c3378ba-61a1-4485-b9ea-be21f50ff912" colab={"base_uri": "https://localhost:8080/", "height": 67} tfidf.inverse_transform(x_train[0].toarray()) # + id="SXNHHojyioOz" colab_type="code" outputId="fd1129e5-4338-4822-da0b-f96e622a516d" colab={"base_uri": "https://localhost:8080/", "height": 33} print('The corpus is large. It contain {} words.'.format(len(x_train[0].toarray()[0]))) # + id="nXyiPW9EjPLt" colab_type="code" colab={} y_train = MoviesTrain[MoviesTrain.columns[10:]] y_test = MoviesTest[MoviesTest.columns[10:]] # + id="1zc1cVg9jPJn" colab_type="code" outputId="416a1d00-eacb-4364-af25-7eae59a4daf4" colab={"base_uri": "https://localhost:8080/", "height": 33} len(y_train.columns) # + id="02UQ_P-tjPHZ" colab_type="code" outputId="69138cb0-7722-490d-9baf-f8e7824937ff" colab={"base_uri": "https://localhost:8080/", "height": 33} len(y_test.columns) # + id="sTFjQok6jPFX" colab_type="code" outputId="be594761-8674-49c6-a439-7073fd7a8de6" colab={"base_uri": "https://localhost:8080/", "height": 47} linearSVC=OneVsRestClassifier(LinearSVC(), n_jobs=1) accuracy_LinearSVC=pd.DataFrame(columns=['Genre', 'accuracy_LinearSVC']) accuracy_LinearSVC.head() # + id="wWHh1hJgjPC5" colab_type="code" outputId="0e1cce46-1715-45c8-a0d2-2dee77a4855c" colab={"base_uri": "https://localhost:8080/", "height": 538} i = 0 for genre in mainGenres: linearSVC.fit(x_train, y_train[genre]) prediction = linearSVC.predict(x_test) accuracy_LinearSVC.loc[i,'Genre'] = genre accuracy_LinearSVC.loc[i,'accuracy_LinearSVC'] = accuracy_score(y_test[genre], prediction) i=i+1 # + id="U10rsySTjPAm" colab_type="code" outputId="1c0c0caf-d8b3-4a4e-c788-87f542ac15c0" colab={"base_uri": "https://localhost:8080/", "height": 494} accuracy_LinearSVC # + id="xPl_Xev2jO9s" colab_type="code" outputId="964ee98d-918d-4f28-b94e-24c6b5e47458" colab={"base_uri": "https://localhost:8080/", "height": 47} multinomialNB=OneVsRestClassifier(MultinomialNB(fit_prior=False, class_prior=None)) accuracy_multinomialNB=pd.DataFrame(columns=['Genre', 'accuracy_multinomialNB']) accuracy_multinomialNB.head() # + id="0ajey4gklaC9" colab_type="code" colab={} i = 0 for genre in mainGenres: multinomialNB.fit(x_train, y_train[genre]) prediction = multinomialNB.predict(x_test) accuracy_multinomialNB.loc[i,'Genre'] = genre accuracy_multinomialNB.loc[i,'accuracy_multinomialNB'] = accuracy_score(y_test[genre], prediction) i=i+1 # + id="OiY-sFFbpMFX" colab_type="code" outputId="a535419a-52f9-4d60-953f-2c355fed3100" colab={"base_uri": "https://localhost:8080/", "height": 494} accuracy_multinomialNB # + id="9MHSqv3JlaAk" colab_type="code" colab={} accuracy_svc_mnb = pd.merge(accuracy_multinomialNB, accuracy_LinearSVC, on='Genre', how='inner') # + id="QYblHP8OlZ-h" colab_type="code" outputId="6c814ca9-d43f-4d44-92db-c72309e45b5e" colab={"base_uri": "https://localhost:8080/", "height": 494} accuracy_svc_mnb # + id="cGPONasVlZ75" colab_type="code" colab={} accuracy_multinomialNB1 = accuracy_multinomialNB accuracy_multinomialNB1.columns = ['Genre', 'accuracy'] accuracy_multinomialNB1['classifier'] = 'multinomialNB' # + id="LIeGWhpq5d4D" colab_type="code" outputId="00a05ca0-45b0-4067-8755-593df0dc8873" colab={"base_uri": "https://localhost:8080/", "height": 494} accuracy_multinomialNB1 # + id="xKFVeITolZ5X" colab_type="code" colab={} accuracy_LinearSVC1 = accuracy_LinearSVC accuracy_LinearSVC1.columns = ['Genre', 'accuracy'] accuracy_LinearSVC1['classifier'] = 'linearSVC' # + id="gaVasaA25th1" colab_type="code" outputId="e9cf5640-0f22-4ba4-832d-6b98cc634186" colab={"base_uri": "https://localhost:8080/", "height": 494} accuracy_LinearSVC1 # + id="sDd8q9NYlZ3S" colab_type="code" outputId="ab6078c5-6237-4cb6-89ef-e0b1789acb6b" colab={"base_uri": "https://localhost:8080/", "height": 942} accu_mnb_svc = accuracy_multinomialNB1.append(accuracy_LinearSVC1) accu_mnb_svc # + id="5wAR2FdPlZ0j" colab_type="code" outputId="9c270258-ac07-4543-bf59-dd330a0a710f" colab={"base_uri": "https://localhost:8080/", "height": 973} sns.set(rc={'figure.figsize':(18,10)}) sns.set(style="whitegrid") s = sns.barplot(x="Genre", y="accuracy", hue="classifier", data=accu_mnb_svc) #.set_title('Movies genre classification accuracy (multinomialNB VS LinearSVC)') s.set_title('Movies genre classification accuracy (multinomialNB VS LinearSVC)', size=16) s.set_xticklabels(list(mainGenres) ,rotation=45, size=15) # + [markdown] id="6dxl07YmubvX" colab_type="text" # # + id="CGg3lUw_mN6f" colab_type="code" outputId="0bc12dd0-e8f4-468b-f359-64d78f42fe88" colab={"base_uri": "https://localhost:8080/", "height": 33} len(mainGenres) # + id="yoJ9y5UkmN4X" colab_type="code" colab={} # + id="baLdzZj9mN26" colab_type="code" colab={} # + id="DWnRKmiSmNzO" colab_type="code" colab={} # + id="zi_LfiyUmNwZ" colab_type="code" colab={}
classifier_Phase.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Standardization, or mean removal and variance scaling # Standardization of datasets is a common requirement for many machine learning estimators implemented in scikit-learn; they might behave badly if the individual features do not more or less look like standard normally distributed data: Gaussian with zero mean and unit variance. # # In practice we often ignore the shape of the distribution and just transform the data to center it by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation. # # For instance, many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the l1 and l2 regularizers of linear models) assume that all features are centered around zero and have variance in the same order. If a feature has a variance that is orders of magnitude larger than others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. # # The preprocessing module provides the StandardScaler utility class, which is a quick and easy way to perform the following operation on an array-like dataset: import warnings warnings.filterwarnings('ignore') from sklearn import preprocessing import numpy as np x_train = np.array([[10,20,30], [45,85,76], [74,85,76]]) x_train scaler = preprocessing.StandardScaler().fit(x_train) scaler scaler.mean_ scaler.scale_ X_scaled = scaler.transform(x_train) X_scaled X_scaled.mean(axis = 0) X_scaled.std(axis = 0) # ![image.png](attachment:image.png) from sklearn.datasets import make_classification from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler x,y = make_classification(random_state = 4) x_train,x_test,y_train,y_test = train_test_split(x,y,random_state = 42) pipe = make_pipeline(StandardScaler(), LogisticRegression()) pipe.fit(x_train,y_train) pipe.score(x_test,y_test) # # Scaling features to a range # An alternative standardization is scaling features to lie between a given minimum and maximum value, often between zero and one, or so that the maximum absolute value of each feature is scaled to unit size. This can be achieved using MinMaxScaler or MaxAbsScaler, respectively. # # The motivation to use this scaling include robustness to very small standard deviations of features and preserving zero entries in sparse data. # # Here is an example to scale a toy data matrix to the [0, 1] range: X_train = np.array([[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]]) min_max_scaler = preprocessing.MinMaxScaler() X_train_minmax = min_max_scaler.fit_transform(X_train) X_train_minmax X_train_minmax X_test = np.array([[-3., -1., 4.]]) X_test_minmax = min_max_scaler.transform(X_test) X_test_minmax min_max_scaler.scale_ min_max_scaler.min_ # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html#sklearn.preprocessing.MinMaxScaler # # MaxAbs Scaler # MaxAbsScaler works in a very similar fashion, but scales in a way that the training data lies within the range [-1, 1] by dividing through the largest maximum value in each feature. It is meant for data that is already centered at zero or sparse data. # # Here is how to use the toy data from the previous example with this scaler: X_train = np.array([[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]]) max_abs_scaler = preprocessing.MaxAbsScaler() X_train_maxabs = max_abs_scaler.fit_transform(X_train) X_train_maxabs X_test = np.array([[ -3., -1., 4.]]) X_test_maxabs = max_abs_scaler.transform(X_test) X_test_maxabs max_abs_scaler.scale_ # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html#sklearn.preprocessing.MaxAbsScaler # # Scaling sparse data # Centering sparse data would destroy the sparseness structure in the data, and thus rarely is a sensible thing to do. However, it can make sense to scale sparse inputs, especially if features are on different scales. # # MaxAbsScaler was specifically designed for scaling sparse data, and is the recommended way to go about this. However, StandardScaler can accept scipy.sparse matrices as input, as long as with_mean=False is explicitly passed to the constructor. Otherwise a ValueError will be raised as silently centering would break the sparsity and would often crash the execution by allocating excessive amounts of memory unintentionally. RobustScaler cannot be fitted to sparse inputs, but you can use the transform method on sparse inputs. # # Note that the scalers accept both Compressed Sparse Rows and Compressed Sparse Columns format (see scipy.sparse.csr_matrix and scipy.sparse.csc_matrix). Any other sparse input will be converted to the Compressed Sparse Rows representation. To avoid unnecessary memory copies, it is recommended to choose the CSR or CSC representation upstream. # # Finally, if the centered data is expected to be small enough, explicitly converting the input to an array using the toarray method of sparse matrices is another option. # # Scaling data with outliers # If your data contains many outliers, scaling using the mean and variance of the data is likely to not work very well. In these cases, you can use RobustScaler as a drop-in replacement instead. It uses more robust estimates for the center and range of your data. # ## RobustScaler # Scale features using statistics that are robust to outliers. # # This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). # # Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the transform method. # # Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. from sklearn.preprocessing import RobustScaler X = [[ 1., -2., 2.], [ -2., 1., 3.], [ 4., 1., -2.]] X transformer = RobustScaler().fit(X) transformer transformer.transform(X) # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html#sklearn.preprocessing.RobustScaler # # Centering kernel matrices # ## KernelCenterer # Center a kernel matrix. # # Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). from sklearn.preprocessing import KernelCenterer from sklearn.metrics.pairwise import pairwise_kernels X = [[ 1., -2., 2.], [ -2., 1., 3.], [ 4., 1., -2.]] K = pairwise_kernels(X, metric='linear') K transformer = KernelCenterer().fit(K) transformer transformer.transform(K) # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KernelCenterer.html#sklearn-preprocessing-kernelcenterer # # Non-linear transformation # ## QuantileTransformer # class sklearn.preprocessing.QuantileTransformer(*, n_quantiles=1000, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, random_state=None, copy=True) # Transform features using quantiles information. # # This method transforms the features to follow a uniform or a normal distribution. Therefore, for a given feature, this transformation tends to spread out the most frequent values. It also reduces the impact of (marginal) outliers: this is therefore a robust preprocessing scheme. # # The transformation is applied on each feature independently. First an estimate of the cumulative distribution function of a feature is used to map the original values to a uniform distribution. The obtained values are then mapped to the desired output distribution using the associated quantile function. Features values of new/unseen data that fall below or above the fitted range will be mapped to the bounds of the output distribution. Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable. # QuantileTransformer provides a non-parametric transformation to map the data to a uniform distribution with values between 0 and 1: import numpy as np from sklearn.preprocessing import QuantileTransformer rng = np.random.RandomState(0) X = np.sort(rng.normal(loc=0.5, scale=0.25, size=(25, 1)), axis=0) qt = QuantileTransformer(n_quantiles=10, random_state=0) qt qt.fit_transform(X) # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.QuantileTransformer.html#sklearn.preprocessing.QuantileTransformer from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) quantile_transformer = preprocessing.QuantileTransformer(random_state=0) X_train_trans = quantile_transformer.fit_transform(X_train) X_test_trans = quantile_transformer.transform(X_test) X_test_trans np.percentile(X_train[:, 0], [0, 25, 50, 75, 100]) # This feature corresponds to the sepal length in cm. Once the quantile transformation applied, those landmarks approach closely the percentiles previously defined: np.percentile(X_train_trans[:, 0], [0, 25, 50, 75, 100]) # This can be confirmed on a independent testing set with similar remarks: np.percentile(X_test[:, 0], [0, 25, 50, 75, 100]) np.percentile(X_test_trans[:, 0], [0, 25, 50, 75, 100]) # # Mapping to a Gaussian distribution # ## PowerTransformer # Apply a power transform featurewise to make data more Gaussian-like. # # Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired. # # Currently, PowerTransformer supports the Box-Cox transform and the Yeo-Johnson transform. The optimal parameter for stabilizing variance and minimizing skewness is estimated through maximum likelihood. # # Box-Cox requires input data to be strictly positive, while Yeo-Johnson supports both positive or negative data. # # By default, zero-mean, unit-variance normalization is applied to the transformed data. # In many modeling scenarios, normality of the features in a dataset is desirable. Power transforms are a family of parametric, monotonic transformations that aim to map data from any distribution to as close to a Gaussian distribution as possible in order to stabilize variance and minimize skewness. # # PowerTransformer currently provides two such power transformations, the Yeo-Johnson transform and the Box-Cox transform. # # The Yeo-Johnson transform is given by: # ![image.png](attachment:image.png) # Box-Cox can only be applied to strictly positive data. In both methods, the transformation is parameterized by , which is determined through maximum likelihood estimation. Here is an example of using Box-Cox to map samples drawn from a lognormal distribution to a normal distribution: import numpy as np from sklearn.preprocessing import PowerTransformer pt = PowerTransformer() data = [[1, 2], [3, 2], [4, 5]] print(pt.fit(data)) print(pt.lambdas_) print(pt.transform(data)) # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html#sklearn.preprocessing.PowerTransformer pt = preprocessing.PowerTransformer(method='box-cox', standardize=False) X_lognormal = np.random.RandomState(616).lognormal(size=(3, 3)) X_lognormal pt.fit_transform(X_lognormal) # While the above example sets the standardize option to False, PowerTransformer will apply zero-mean, unit-variance normalization to the transformed output by default. # # Normalization # Normalization is the process of scaling individual samples to have unit norm. This process can be useful if you plan to use a quadratic form such as the dot-product or any other kernel to quantify the similarity of any pair of samples. # # This assumption is the base of the Vector Space Model often used in text classification and clustering contexts. # # The function normalize provides a quick and easy way to perform this operation on a single array-like dataset, either using the l1, l2, or max norms: X = [[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]] # + X_normalized = preprocessing.normalize(X, norm='l2') X_normalized # - # The preprocessing module further provides a utility class Normalizer that implements the same operation using the Transformer API (even though the fit method is useless in this case: the class is stateless as this operation treats samples independently). # # This class is hence suitable for use in the early steps of a Pipeline: normalizer = preprocessing.Normalizer().fit(X) # fit does nothing normalizer # The normalizer instance can then be used on sample vectors as any transformer: normalizer.transform(X) normalizer.transform([[-1., 1., 0.]]) # Note: L2 normalization is also known as spatial sign preprocessing. # # Encoding categorical features # Often features are not given as continuous values but categorical. For example a person could have features ["male", "female"], ["from Europe", "from US", "from Asia"], ["uses Firefox", "uses Chrome", "uses Safari", "uses Internet Explorer"]. Such features can be efficiently coded as integers, for instance ["male", "from US", "uses Internet Explorer"] could be expressed as [0, 1, 3] while ["female", "from Asia", "uses Chrome"] would be [1, 2, 1]. # # To convert categorical features to such integer codes, we can use the OrdinalEncoder. This estimator transforms each categorical feature to one new feature of integers (0 to n_categories - 1): enc = preprocessing.OrdinalEncoder() X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from US', 'uses Safari']]) # Such integer representation can, however, not be used directly with all scikit-learn estimators, as these expect continuous input, and would interpret the categories as being ordered, which is often not desired (i.e. the set of browsers was ordered arbitrarily). # # Another possibility to convert categorical features to features that can be used with scikit-learn estimators is to use a one-of-K, also known as one-hot or dummy encoding. This type of encoding can be obtained with the OneHotEncoder, which transforms each categorical feature with n_categories possible values into n_categories binary features, with one of them 1, and all others 0. # ## Continuing the example above:- enc = preprocessing.OneHotEncoder() X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from US', 'uses Safari'], ['male', 'from Europe', 'uses Safari']]).toarray() # By default, the values each feature can take is inferred automatically from the dataset and can be found in the categories_ attribute: enc.categories_ # It is possible to specify this explicitly using the parameter categories. There are two genders, four possible continents and four web browsers in our dataset: genders = ['female', 'male'] locations = ['from Africa', 'from Asia', 'from Europe', 'from US'] browsers = ['uses Chrome', 'uses Firefox', 'uses IE', 'uses Safari'] enc = preprocessing.OneHotEncoder(categories=[genders, locations, browsers]) X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from Asia', 'uses Chrome']]).toarray() # If there is a possibility that the training data might have missing categorical features, it can often be better to specify handle_unknown='ignore' instead of setting the categories manually as above. When handle_unknown='ignore' is specified and unknown categories are encountered during transform, no error will be raised but the resulting one-hot encoded columns for this feature will be all zeros (handle_unknown='ignore' is only supported for one-hot encoding): enc = preprocessing.OneHotEncoder(handle_unknown='ignore') X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] enc.fit(X) enc.transform([['female', 'from Asia', 'uses Chrome']]).toarray() # It is also possible to encode each column into n_categories - 1 columns instead of n_categories columns by using the drop parameter. This parameter allows the user to specify a category for each feature to be dropped. This is useful to avoid co-linearity in the input matrix in some classifiers. Such functionality is useful, for example, when using non-regularized regression (LinearRegression), since co-linearity would cause the covariance matrix to be non-invertible. When this parameter is not None, handle_unknown must be set to error: X = [['male', 'from US', 'uses Safari'], ['female', 'from Europe', 'uses Firefox']] X drop_enc = preprocessing.OneHotEncoder(drop='first').fit(X) drop_enc.categories_ drop_enc.transform(X).toarray() # One might want to drop one of the two columns only for features with 2 categories. In this case, you can set the parameter drop='if_binary'. X = [['male', 'US', 'Safari'], ['female', 'Europe', 'Firefox'], ['female', 'Asia', 'Chrome']] drop_enc = preprocessing.OneHotEncoder(drop='if_binary').fit(X) drop_enc.categories_ drop_enc.transform(X).toarray() # In the transformed X, the first column is the encoding of the feature with categories “male”/”female”, while the remaining 6 columns is the encoding of the 2 features with respectively 3 categories each. # # OneHotEncoder # OneHotEncoder supports categorical features with missing values by considering the missing values as an additional category: # Encode categorical features as a one-hot numeric array. # # The input to this transformer should be an array-like of integers or strings, denoting the values taken on by categorical (discrete) features. The features are encoded using a one-hot (aka ‘one-of-K’ or ‘dummy’) encoding scheme. This creates a binary column for each category and returns a sparse matrix or dense array (depending on the sparse parameter) # # By default, the encoder derives the categories based on the unique values in each feature. Alternatively, you can also specify the categories manually. # # This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. # # Note: a one-hot encoding of y labels should use a LabelBinarizer instead. # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html#sklearn.preprocessing.OneHotEncoder # # Examples # Given a dataset with two features, we let the encoder find the unique values per feature and transform the data to a binary one-hot encoding. from sklearn.preprocessing import OneHotEncoder enc = OneHotEncoder(handle_unknown='ignore') X = [['Male', 1], ['Female', 3], ['Female', 2]] enc.fit(X) enc.categories_ enc.transform([['Female', 1], ['Male', 4]]).toarray() enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]]) enc.get_feature_names(['gender', 'group']) # ### One can always drop the first column for each feature: drop_enc = OneHotEncoder(drop='first').fit(X) drop_enc.categories_ drop_enc.transform([['Female', 1], ['Male', 2]]).toarray() # ### Or drop a column for feature only having 2 categories: drop_binary_enc = OneHotEncoder(drop='if_binary').fit(X) drop_binary_enc.transform([['Female', 1], ['Male', 2]]).toarray() # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html#sklearn.preprocessing.OneHotEncoder # # Discretization # Discretization (otherwise known as quantization or binning) provides a way to partition continuous features into discrete values. Certain datasets with continuous features may benefit from discretization, because discretization can transform the dataset of continuous attributes to one with only nominal attributes. # # One-hot encoded discretized features can make a model more expressive, while maintaining interpretability. For instance, pre-processing with a discretizer can introduce nonlinearity to linear models. # ## K-bins discretization # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html#sklearn.preprocessing.KBinsDiscretizer # KBinsDiscretizer discretizes features into k bins: X = np.array([[ -3., 5., 15 ], [ 0., 6., 14 ], [ 6., 3., 11 ]]) est = preprocessing.KBinsDiscretizer(n_bins=[3, 2, 2], encode='ordinal').fit(X) # By default the output is one-hot encoded into a sparse matrix (See Encoding categorical features) and this can be configured with the encode parameter. For each feature, the bin edges are computed during fit and together with the number of bins, they will define the intervals. Therefore, for the current example, these intervals are defined as: # Based on these bin intervals, X is transformed as follows: est.transform(X) # The resulting dataset contains ordinal attributes which can be further used in a Pipeline. # # Discretization is similar to constructing histograms for continuous data. However, histograms focus on counting features which fall into particular bins, whereas discretization focuses on assigning feature values to these bins. # ## KBinsDiscretizer # KBinsDiscretizer implements different binning strategies, which can be selected with the strategy parameter. The ‘uniform’ strategy uses constant-width bins. The ‘quantile’ strategy uses the quantiles values to have equally populated bins in each feature. The ‘kmeans’ strategy defines bins based on a k-means clustering procedure performed on each feature independently. # https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.KBinsDiscretizer.html#sklearn.preprocessing.KBinsDiscretizer X = [[-2, 1, -4, -1], [-1, 2, -3, -0.5], [ 0, 3, -2, 0.5], [ 1, 4, -1, 2]] X est = preprocessing.KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform') est.fit(X) Xt = est.transform(X) Xt # Sometimes it may be useful to convert the data back into the original feature space. The inverse_transform function converts the binned data into the original feature space. Each value will be equal to the mean of the two bin edges. est.bin_edges_[0] est.inverse_transform(Xt) # # Feature binarization # Feature binarization is the process of thresholding numerical features to get boolean values. This can be useful for downstream probabilistic estimators that make assumption that the input data is distributed according to a multi-variate Bernoulli distribution. For instance, this is the case for the BernoulliRBM. # # It is also common among the text processing community to use binary feature values (probably to simplify the probabilistic reasoning) even if normalized counts (a.k.a. term frequencies) or TF-IDF valued features often perform slightly better in practice. # # As for the Normalizer, the utility class Binarizer is meant to be used in the early stages of Pipeline. The fit method does nothing as each sample is treated independently of others: # # X = [[ 1., -1., 2.], [ 2., 0., 0.], [ 0., 1., -1.]] binarizer = preprocessing.Binarizer().fit(X) # fit does nothing binarizer binarizer.transform(X) # It is possible to adjust the threshold of the binarizer: binarizer = preprocessing.Binarizer(threshold=1.1) binarizer.transform(X) # As for the Normalizer class, the preprocessing module provides a companion function binarize to be used when the transformer API is not necessary. # # Note that the Binarizer is similar to the KBinsDiscretizer when k = 2, and when the bin edge is at the value threshold. # # Imputation of missing values # For various reasons, many real world datasets contain missing values, often encoded as blanks, NaNs or other placeholders. Such datasets however are incompatible with scikit-learn estimators which assume that all values in an array are numerical, and that all have and hold meaning. A basic strategy to use incomplete datasets is to discard entire rows and/or columns containing missing values. However, this comes at the price of losing data which may be valuable (even though incomplete). A better strategy is to impute the missing values, i.e., to infer them from the known part of the data. See the Glossary of Common Terms and API Elements entry on imputation. # # Univariate vs. Multivariate Imputation # One type of imputation algorithm is univariate, which imputes values in the i-th feature dimension using only non-missing values in that feature dimension (e.g. impute.SimpleImputer). By contrast, multivariate imputation algorithms use the entire set of available feature dimensions to estimate the missing values (e.g. impute.IterativeImputer). # # Univariate feature imputation # The SimpleImputer class provides basic strategies for imputing missing values. Missing values can be imputed with a provided constant value, or using the statistics (mean, median or most frequent) of each column in which the missing values are located. This class also allows for different missing values encodings. # # The following snippet demonstrates how to replace missing values, encoded as np.nan, using the mean value of the columns (axis 0) that contain the missing values: import numpy as np from sklearn.impute import SimpleImputer imp = SimpleImputer(missing_values=np.nan, strategy='mean') imp.fit([[1, 2], [np.nan, 3], [7, 6]]) X = [[np.nan, 2], [6, np.nan], [7, 6]] print(imp.transform(X)) # ## The SimpleImputer class also supports sparse matrices: import scipy.sparse as sp X = sp.csc_matrix([[1, 2], [0, -1], [8, 4]]) imp = SimpleImputer(missing_values=-1, strategy='mean') imp.fit(X) X_test = sp.csc_matrix([[-1, 2], [6, -1], [7, 6]]) print(imp.transform(X_test).toarray()) # Note that this format is not meant to be used to implicitly store missing values in the matrix because it would densify it at transform time. Missing values encoded by 0 must be used with dense input. # https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer import pandas as pd df = pd.DataFrame([["a", "x"], [np.nan, "y"], ["a", np.nan], ["b", "y"]], dtype="category") imp = SimpleImputer(strategy="most_frequent") print(imp.fit_transform(df)) # # Multivariate feature imputation # A more sophisticated approach is to use the IterativeImputer class, which models each feature with missing values as a function of other features, and uses that estimate for imputation. It does so in an iterated round-robin fashion: at each step, a feature column is designated as output y and the other feature columns are treated as inputs X. A regressor is fit on (X, y) for known y. Then, the regressor is used to predict the missing values of y. This is done for each feature in an iterative fashion, and then is repeated for max_iter imputation rounds. The results of the final imputation round are returned. # https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn.impute.IterativeImputer import numpy as np from sklearn.experimental import enable_iterative_imputer from sklearn.impute import IterativeImputer imp = IterativeImputer(max_iter=10, random_state=0) imp.fit([[1, 2], [3, 6], [4, 8], [np.nan, 3], [7, np.nan]]) X_test = [[np.nan, 2], [6, np.nan], [np.nan, 6]] print(np.round(imp.transform(X_test))) # https://scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html#sphx-glr-auto-examples-impute-plot-missing-values-py # # Multiple vs. Single Imputation # In the statistics community, it is common practice to perform multiple imputations, generating, for example, m separate imputations for a single feature matrix. Each of these m imputations is then put through the subsequent analysis pipeline (e.g. feature engineering, clustering, regression, classification). The m final analysis results (e.g. held-out validation errors) allow the data scientist to obtain understanding of how analytic results may differ as a consequence of the inherent uncertainty caused by the missing values. The above practice is called multiple imputation. # # Our implementation of IterativeImputer was inspired by the R MICE package (Multivariate Imputation by Chained Equations) 1, but differs from it by returning a single imputation instead of multiple imputations. However, IterativeImputer can also be used for multiple imputations by applying it repeatedly to the same dataset with different random seeds when sample_posterior=True. See 2, chapter 4 for more discussion on multiple vs. single imputations. # # It is still an open problem as to how useful single vs. multiple imputation is in the context of prediction and classification when the user is not interested in measuring uncertainty due to missing values. # # Note that a call to the transform method of IterativeImputer is not allowed to change the number of samples. Therefore multiple imputations cannot be achieved by a single call to transform. # # Nearest neighbors imputation # The KNNImputer class provides imputation for filling in missing values using the k-Nearest Neighbors approach. By default, a euclidean distance metric that supports missing values, nan_euclidean_distances, is used to find the nearest neighbors. Each missing feature is imputed using values from n_neighbors nearest neighbors that have a value for the feature. The feature of the neighbors are averaged uniformly or weighted by distance to each neighbor. If a sample has more than one feature missing, then the neighbors for that sample can be different depending on the particular feature being imputed. When the number of available neighbors is less than n_neighbors and there are no defined distances to the training set, the training set average for that feature is used during imputation. If there is at least one neighbor with a defined distance, the weighted or unweighted average of the remaining neighbors will be used during imputation. If a feature is always missing in training, it is removed during transform. For more information on the methodology, # # The following snippet demonstrates how to replace missing values, encoded as np.nan, using the mean feature value of the two nearest neighbors of samples with missing values: # https://scikit-learn.org/stable/modules/generated/sklearn.impute.KNNImputer.html#sklearn.impute.KNNImputer import numpy as np from sklearn.impute import KNNImputer nan = np.nan X = [[1, 2, nan], [3, 4, 3], [nan, 6, 5], [8, 8, 7]] imputer = KNNImputer(n_neighbors=2, weights="uniform") imputer.fit_transform(X) # # Marking imputed values # The MissingIndicator transformer is useful to transform a dataset into corresponding binary matrix indicating the presence of missing values in the dataset. This transformation is useful in conjunction with imputation. When using imputation, preserving the information about which values had been missing can be informative. Note that both the SimpleImputer and IterativeImputer have the boolean parameter add_indicator (False by default) which when set to True provides a convenient way of stacking the output of the MissingIndicator transformer with the output of the imputer. # NaN is usually used as the placeholder for missing values. However, it enforces the data type to be float. The parameter missing_values allows to specify other placeholder such as integer. # # In the following example, we will use -1 as missing values: from sklearn.impute import MissingIndicator X = np.array([[-1, -1, 1, 3], [4, -1, 0, -1], [8, -1, 1, 0]]) indicator = MissingIndicator(missing_values=-1) mask_missing_values_only = indicator.fit_transform(X) mask_missing_values_only # The features parameter is used to choose the features for which the mask is constructed. By default, it is 'missing-only' which returns the imputer mask of the features containing missing values at fit time: indicator.features_ # The features parameter can be set to 'all' to return all features whether or not they contain missing values: indicator = MissingIndicator(missing_values=-1, features="all") mask_all = indicator.fit_transform(X) mask_all indicator.features_ # https://scikit-learn.org/stable/modules/generated/sklearn.impute.MissingIndicator.html#sklearn.impute.MissingIndicator # When using the MissingIndicator in a Pipeline, be sure to use the FeatureUnion or ColumnTransformer to add the indicator features to the regular features. First we obtain the iris dataset, and add some missing values to it. from sklearn.datasets import load_iris from sklearn.impute import SimpleImputer, MissingIndicator from sklearn.model_selection import train_test_split from sklearn.pipeline import FeatureUnion, make_pipeline from sklearn.tree import DecisionTreeClassifier X, y = load_iris(return_X_y=True) mask = np.random.randint(0, 2, size=X.shape).astype(bool) X[mask] = np.nan X_train, X_test, y_train, _ = train_test_split(X, y, test_size=100, random_state=0) # Now we create a FeatureUnion. All features will be imputed using SimpleImputer, in order to enable classifiers to work with this data. Additionally, it adds the indicator variables from MissingIndicator. transformer = FeatureUnion( transformer_list=[ ('features', SimpleImputer(strategy='mean')), ('indicators', MissingIndicator())]) transformer = transformer.fit(X_train, y_train) results = transformer.transform(X_test) results.shape # Of course, we cannot use the transformer to make any predictions. We should wrap this in a Pipeline with a classifier (e.g., a DecisionTreeClassifier) to be able to make predictions. clf = make_pipeline(transformer, DecisionTreeClassifier()) clf = clf.fit(X_train, y_train) results = clf.predict(X_test) results.shape
Preprocessing Data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import time import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import matplotlib.pyplot as plt from PIL import Image import os os.environ['CUDA_VISIBLE_DEVICES'] = "0" if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True # + # Hyperparameters RANDOM_SEED = 1 LEARNING_RATE = 0.001 BATCH_SIZE = 128 NUM_EPOCHS = 25 # Architecture NUM_FEATURES = 28*28 NUM_CLASSES = 10 # Other DEVICE = "cuda" GRAYSCALE = False # + train_dataset = datasets.MNIST(root='data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = datasets.MNIST(root='data', train=False, transform=transforms.ToTensor()) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False) # Checking the dataset for images, labels in train_loader: print('Image batch dimensions:', images.shape) print('Image label dimensions:', labels.shape) break # + device = torch.device(DEVICE) torch.manual_seed(0) for epoch in range(2): for batch_idx, (x, y) in enumerate(train_loader): print('Epoch:', epoch+1, end='') print(' | Batch index:', batch_idx, end='') print(' | Batch size:', y.size()[0]) x = x.to(device) y = y.to(device) break # + from torch.utils import data from torchvision import transforms from torchvision.datasets import MNIST class BiasedMNIST(MNIST): """A base class for Biased-MNIST. We manually select ten colours to synthetic colour bias. (See `COLOUR_MAP` for the colour configuration) Usage is exactly same as torchvision MNIST dataset class. You have two paramters to control the level of bias. Parameters ---------- root : str path to MNIST dataset. data_label_correlation : float, default=1.0 Here, each class has the pre-defined colour (bias). data_label_correlation, or `rho` controls the level of the dataset bias. A sample is coloured with - the pre-defined colour with probability `rho`, - coloured with one of the other colours with probability `1 - rho`. The number of ``other colours'' is controlled by `n_confusing_labels` (default: 9). Note that the colour is injected into the background of the image (see `_binary_to_colour`). Hence, we have - Perfectly biased dataset with rho=1.0 - Perfectly unbiased with rho=0.1 (1/10) ==> our ``unbiased'' setting in the test time. In the paper, we explore the high correlations but with small hints, e.g., rho=0.999. n_confusing_labels : int, default=9 In the real-world cases, biases are not equally distributed, but highly unbalanced. We mimic the unbalanced biases by changing the number of confusing colours for each class. In the paper, we use n_confusing_labels=9, i.e., during training, the model can observe all colours for each class. However, you can make the problem harder by setting smaller n_confusing_labels, e.g., 2. We suggest to researchers considering this benchmark for future researches. """ COLOUR_MAP = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [225, 225, 0], [225, 0, 225], [0, 255, 255], [255, 128, 0], [255, 0, 128], [128, 0, 255], [128, 128, 128]] def __init__(self, root, train=True, transform=None, target_transform=None, download=False, data_label_correlation=1.0, n_confusing_labels=9): super().__init__(root, train=train, transform=transform, target_transform=target_transform, download=download) self.random = True self.data_label_correlation = data_label_correlation self.n_confusing_labels = n_confusing_labels self.data, self.targets, self.colored_bg, self.biased_targets = self.build_biased_mnist() indices = np.arange(len(self.data)) self._shuffle(indices) self.data = self.data[indices].numpy() self.colored_bg = self.colored_bg[indices].numpy() self.targets = self.targets[indices] self.biased_targets = self.biased_targets[indices] @property def raw_folder(self): return os.path.join(self.root, 'raw') @property def processed_folder(self): return os.path.join(self.root, 'processed') def _shuffle(self, iteratable): if self.random: np.random.shuffle(iteratable) def _make_biased_mnist(self, indices, label): raise NotImplementedError def _update_bias_indices(self, bias_indices, label): if self.n_confusing_labels > 9 or self.n_confusing_labels < 1: raise ValueError(self.n_confusing_labels) indices = np.where((self.targets == label).numpy())[0] self._shuffle(indices) indices = torch.LongTensor(indices) n_samples = len(indices) n_correlated_samples = int(n_samples * self.data_label_correlation) n_decorrelated_per_class = int(np.ceil((n_samples - n_correlated_samples) / (self.n_confusing_labels))) correlated_indices = indices[:n_correlated_samples] bias_indices[label] = torch.cat([bias_indices[label], correlated_indices]) decorrelated_indices = torch.split(indices[n_correlated_samples:], n_decorrelated_per_class) other_labels = [_label % 10 for _label in range(label + 1, label + 1 + self.n_confusing_labels)] self._shuffle(other_labels) for idx, _indices in enumerate(decorrelated_indices): _label = other_labels[idx] bias_indices[_label] = torch.cat([bias_indices[_label], _indices]) def build_biased_mnist(self): """Build biased MNIST. """ n_labels = self.targets.max().item() + 1 bias_indices = {label: torch.LongTensor() for label in range(n_labels)} for label in range(n_labels): self._update_bias_indices(bias_indices, label) data = torch.ByteTensor() targets = torch.LongTensor() colored_bg = torch.ByteTensor() biased_targets = [] for bias_label, indices in bias_indices.items(): (_data, _colored_bg), _targets = self._make_biased_mnist(indices, bias_label) data = torch.cat([data, _data]) colored_bg = torch.cat([colored_bg, _colored_bg]) targets = torch.cat([targets, _targets]) biased_targets.extend([bias_label] * len(indices)) biased_targets = torch.LongTensor(biased_targets) return data, targets, colored_bg, biased_targets def __getitem__(self, index): img, target = self.data[index], self.targets[index] colored_bg = self.colored_bg[index] img = Image.fromarray(img.astype(np.uint8), mode='RGB') bias = Image.fromarray(colored_bg.astype(np.uint8), mode='RGB') if self.transform is not None: img = self.transform(img) bias = self.transform(bias) if self.target_transform is not None: target = self.target_transform(target) img = np.asarray(img) bias = np.asarray(bias) label = torch.zeros(10) label.scatter_(0, target, 1) return img, target, bias, int(self.biased_targets[index]) class ColourBiasedMNIST(BiasedMNIST): def __init__(self, root, train=True, transform=None, target_transform=None, download=False, data_label_correlation=1.0, n_confusing_labels=9): super(ColourBiasedMNIST, self).__init__(root, train=train, transform=transform, target_transform=target_transform, download=download, data_label_correlation=data_label_correlation, n_confusing_labels=n_confusing_labels) def _binary_to_colour(self, data, colour): fg_data = torch.zeros_like(data) fg_data[data != 0] = 255 fg_data[data == 0] = 0 fg_data = torch.stack([fg_data, fg_data, fg_data], dim=1) bg_data = torch.zeros_like(data) bg_data[data == 0] = 1 bg_data[data != 0] = 0 bg_data = torch.stack([bg_data, bg_data, bg_data], dim=3) bg_data = bg_data * torch.ByteTensor(colour) colored_bg = (bg_data + 1 - bg_data) * torch.ByteTensor(colour) bg_data = bg_data.permute(0, 3, 1, 2) data = fg_data + bg_data return data.permute(0, 2, 3, 1), colored_bg def _make_biased_mnist(self, indices, label): return self._binary_to_colour(self.data[indices], self.COLOUR_MAP[label]), self.targets[indices] def get_biased_mnist_dataloader(root, batch_size, data_label_correlation, n_confusing_labels=9, train=True, num_workers=4): transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))]) dataset = ColourBiasedMNIST(root, train=train, transform=transform, download=True, data_label_correlation=data_label_correlation, n_confusing_labels=n_confusing_labels) dataloader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True) return dataloader # - cmnist_train = get_biased_mnist_dataloader(root='data', batch_size=128, data_label_correlation=0.990, n_confusing_labels=9, train=True) cmnist_val_bias = get_biased_mnist_dataloader(root='data', batch_size=128, data_label_correlation=0.990, n_confusing_labels=9, train=False) cmnist_val_unbias = get_biased_mnist_dataloader(root='data', batch_size=128, data_label_correlation=0.1, n_confusing_labels=9, train=False) cmnist_val_origin = get_biased_mnist_dataloader(root='data', batch_size=128, data_label_correlation=0, n_confusing_labels=9, train=False) for batch_idx, (features, targets, bias, _) in enumerate(cmnist_train): features = features targets = targets break targets[3] print(features.shape) print(targets[3]) print(bias.shape) nhw_img = np.transpose(features[3], axes=(1, 2, 0)) plt.imshow(nhw_img); bg = np.transpose(bias[3], axes=(1, 2, 0)) plt.imshow(bg); # + ########################## ### MODEL ########################## def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes, grayscale): self.inplanes = 64 if grayscale: in_dim = 1 else: in_dim = 3 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, (2. / n)**.5) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) # because MNIST is already 1x1 here: # disable avg pooling #x = self.avgpool(x) x = x.view(x.size(0), -1) logits = self.fc(x) probas = F.softmax(logits, dim=1) return logits, probas def resnet18(num_classes): """Constructs a ResNet-18 model.""" model = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], num_classes=NUM_CLASSES, grayscale=GRAYSCALE) return model # - class SimpleConvNet(nn.Module): def __init__(self, num_classes=10, kernel_size=7, feature_pos='post'): super(SimpleConvNet, self).__init__() padding = kernel_size // 2 layers = [ nn.Conv2d(3, 16, kernel_size=kernel_size, padding=padding), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.Conv2d(16, 32, kernel_size=kernel_size, padding=padding), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 64, kernel_size=kernel_size, padding=padding), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 128, kernel_size=kernel_size, padding=padding), nn.BatchNorm2d(128), nn.ReLU(inplace=True), ] self.extracter = nn.Sequential(*layers) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(128, 10) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if feature_pos not in ['pre', 'post', 'logits']: raise ValueError(feature_pos) self.feature_pos = feature_pos def forward(self, x, logits_only=False): pre_gap_feats = self.extracter(x) post_gap_feats = self.avgpool(pre_gap_feats) post_gap_feats = torch.flatten(post_gap_feats, 1) logits = self.fc(post_gap_feats) if logits_only: return logits elif self.feature_pos == 'pre': feats = pre_gap_feats elif self.feature_pos == 'post': feats = post_gap_feats else: feats = logits return logits, feats # + torch.manual_seed(RANDOM_SEED) model = resnet18(NUM_CLASSES) model.to(DEVICE) # model = SimpleConvNet(kernel_size=7, feature_pos='post').to(DEVICE) # model_c = SimpleConvNet(kernel_size=1, feature_pos='post').to(DEVICE) model_c = resnet18(NUM_CLASSES) model_c.to(DEVICE) optimizer = torch.optim.Adam([{'params': model.parameters()}, {'params': model_c.parameters()}], lr=LEARNING_RATE) # optimizer_c = torch.optim.Adam(model_c.parameters(), lr=LEARNING_RATE) # - def compute_accuracy(model, data_loader, device): correct_pred, num_examples = 0, 0 for i, (features, targets, bias, _) in enumerate(data_loader): features = features.to(device) targets = targets.to(device) # targets = torch.argmax(targets, 1).to(device) bias = bias.to(device) logits, probas = model(features) # logits, probas = model(bias) _, predicted_labels = torch.max(logits, 1) num_examples += targets.size(0) correct_pred += (predicted_labels == targets).sum() return correct_pred.float()/num_examples * 100 # + start_time = time.time() for epoch in range(NUM_EPOCHS): model.train() for batch_idx, (features, targets, biases, _) in enumerate(cmnist_train): features = features.to(DEVICE).requires_grad_() targets = targets.to(DEVICE) biases = biases.to(DEVICE).requires_grad_() ## FORWARD AND BACK PROP bias_pred, feat_b = model_c(biases) logits, probas = model(features) # cost_bias = F.binary_cross_entropy_with_logits(bias_pred, targets) logits_rubi = logits * torch.sigmoid(bias_pred) cost = nn.CrossEntropyLoss()(logits_rubi, targets) + nn.CrossEntropyLoss()(bias_pred, targets) # cost = F.binary_cross_entropy_with_logits(logits, targets) + cost_bias # cost = F.binary_cross_entropy_with_logits(logits, targets) optimizer.zero_grad() cost.backward() optimizer.step() ### LOGGING if not batch_idx % 50: print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f' %(epoch+1, NUM_EPOCHS, batch_idx, len(cmnist_train), cost)) model.eval() with torch.set_grad_enabled(False): # save memory during inference print('Epoch: %03d/%03d | Train: %.3f%%' % ( epoch+1, NUM_EPOCHS, compute_accuracy(model, cmnist_val_unbias, device=DEVICE))) print('Time elapsed: %.2f min' % ((time.time() - start_time)/60)) print('Total Training Time: %.2f min' % ((time.time() - start_time)/60)) # - ####Test#### with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, cmnist_val_unbias, device=DEVICE))) ####Test#### with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, cmnist_val_origin, device=DEVICE))) ####Test#### with torch.set_grad_enabled(False): # save memory during inference print('Test accuracy: %.2f%%' % (compute_accuracy(model, cmnist_val_bias, device=DEVICE))) # + for batch_idx, (features, targets) in enumerate(test_loader): features = features targets = targets break nhw_img = np.transpose(features[4].expand(3,28,28), axes=(1, 2, 0)) plt.imshow(nhw_img); # - #
MNIST/color_mnist_rubi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # This sets up to components that we need for the program # + import numpy as np import sep import matplotlib.pyplot as plt from astropy.io import fits # %matplotlib inline from matplotlib import rcParams rcParams['figure.figsize'] = [10., 8.] # - # This finds the file in my documents and renames it to fname fname = fits.util.get_testdata_filepath('image.fits') hdul = fits.open(fname) hdul.info() # This helps read the file and stores it in "data" data = fits.getdata(fname) print(type(data)) print(data.shape) # plots the image of fits image plt.imshow(data, cmap='gray') plt.colorbar() # gets the data of the background bkg = sep.Background(data) # prints the background color data numbers print(bkg.globalback) print(bkg.globalrms) # stores the image of the background to bkg_image bkg_image = bkg.back() # plts bkg.back into an image plt.imshow(bkg_image, interpolation='nearest', cmap='gray', origin='lower') plt.colorbar(); # stores the other color found in the background in bkg_rms bkg_rms = bkg.rms() # plots the bkg_rms image plt.imshow(bkg_rms, interpolation='nearest', cmap='gray', origin='lower') plt.colorbar(); # this the subtraction of the background from the original image data_sub = data - bkg # this helps detect the objects that are found in the picture objects = sep.extract(data_sub, 1.5, err=bkg.globalrms) # Prints out the number of objects found in the picture len(objects) # This coordinates the center of the objects and detects where they are. # This also circles the objects in red using Ellipse # + from matplotlib.patches import Ellipse # plot background-subtracted image fig, ax = plt.subplots() m, s = np.mean(data_sub), np.std(data_sub) im = ax.imshow(data_sub, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower') # plot an ellipse for each object for i in range(len(objects)): e = Ellipse(xy=(objects['x'][i], objects['y'][i]), width=6*objects['a'][i], height=6*objects['b'][i], angle=objects['theta'][i] * 180. / np.pi) e.set_facecolor('none') e.set_edgecolor('red') ax.add_artist(e) # - # Name of available fields objects.dtype.names # This performs circular aperture photometry with 3 pixel radius at the location of the objects flux, fluxerr, flag = sep.sum_circle(data_sub, objects['x'], objects['y'], 3.0, err=bkg.globalrms, gain=1.0) # This shows the result of the first 10 objects found for i in range(10): print("object {:d}: flux = {:f} +/- {:f}".format(i, flux[i], fluxerr[i])) # Saves the figures to png files plt.imsave("figure1.png", data) plt.imsave("figure2.png", bkg_image) plt.imsave("figure3.png", bkg_rms) plt.imsave("figure4.png", data_sub)
astropy_fits_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # What is probability? A simulated introduction #Import packages import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline sns.set() # ## Learning Objectives of Part 1 # - To have an understanding of what "probability" means, in both Bayesian and Frequentist terms; # - To be able to simulate probability distributions that model real-world phenomena; # - To understand how probability distributions relate to data-generating **stories**; # - To understand and be able to simulate joint probabilities and conditional probabilities; # - To understand Bayes' Theorem and its utility. # ## 1. Probability # > To the pioneers such as Bernoulli, Bayes and Laplace, a probability represented a _degree-of-belief_ or plausibility; how much they thought that something was true, based on the evidence at hand. To the 19th century scholars, however, this seemed too vague and subjective an idea to be the basis of a rigorous mathematical theory. So they redefined probability as the _long-run relative frequency_ with which an event occurred, given (infinitely) many repeated (experimental) trials. Since frequencies can be measured, probability was now seen as an objective tool for dealing with _random_ phenomena. # # -- _Data Analysis, A Bayesian Tutorial_, Sivia & Skilling (p. 9) # What type of random phenomena are we talking about here? One example is: # # - Knowing that a website has a click-through rate (CTR) of 10%, we can calculate the probability of having 10 people, 9 people, 8 people ... and so on click through, upon drawing 10 people randomly from the population; # - But given the data of how many people click through, how can we calculate the CTR? And how certain can we be of this CTR? Or how likely is a particular CTR? # # Science mostly asks questions of the second form above & Bayesian thinking provides a wonderful framework for answering such questions. Essentially Bayes' Theorem gives us a way of moving from the probability of the data given the model (written as $P(data|model)$) to the probability of the model given the data ($P(model|data)$). # # We'll first explore questions of the 1st type using simulation: knowing the model, what is the probability of seeing certain data? # ## 2. Simulating probabilities # * Let's say that a website has a CTR of 50%, i.e. that 50% of people click through. If we picked 1000 people at random from the population, how likely would it be to find that a certain number of people click? # # We can simulate this using `numpy`'s random number generator. # # To do so, first note we can use `np.random.rand()` to randomly select floats between 0 and 1 (known as the _uniform distribution_). Below, we do so and plot a histogram: # Draw 1,000 samples from uniform & plot results x = np.random.rand(1000) plt.hist(x); # To then simulate the sampling from the population, we check whether each float was greater or less than 0.5. If less than or equal to 0.5, we say the person clicked. # Computed how many people click clicks = x <= 0.5 n_clicks = sum(clicks) f"Number of clicks = {n_clicks}" # The proportion of people who clicked can be calculated as the total number of clicks over the number of people: # Computed proportion of people who clicked f"Proportion who clicked = {n_clicks/len(clicks)}" # **Discussion**: Did you get the same answer as your neighbour? If you did, why? If not, why not? # **Up for discussion:** Let's say that all you had was this data and you wanted to figure out the CTR (probability of clicking). # # * What would your estimate be? # * Bonus points: how confident would you be of your estimate? # **Note:** Although, in the above, we have described _probability_ in two ways, we have not described it mathematically. We're not going to do so rigorously here, but we will say that _probability_ defines a function from the space of possibilities (in the above, the interval $[0,1]$) that describes how likely it is to get a particular point or region in that space. Mike Betancourt has an elegant [Introduction to Probability Theory (For Scientists and Engineers)](https://betanalpha.github.io/assets/case_studies/probability_theory.html) that I can recommend. # ### Hands-on: more clicking # Use random sampling to simulate how many people click when the CTR is 0.7. How many click? What proportion? # Solution clicks = x <= 0.7 n_clicks = sum(clicks) print(f"Number of clicks = {n_clicks}") print(f"Proportion who clicked = {n_clicks/len(clicks)}") # _Discussion point_: This model is known as the bias coin flip. # - Can you see why? # - Can it be used to model other phenomena? # ### Galapagos finch beaks # You can also calculate such proportions with real-world data. Here we import a dataset of Finch beak measurements from the Galápagos islands. You can find the data [here](https://datadryad.org/resource/doi:10.5061/dryad.9gh90). # Import and view head of data df_12 = pd.read_csv('../../data/finch_beaks_2012.csv') df_12.head() # Store lengths in a pandas series lengths = df_12['blength'] # * What proportion of birds have a beak length > 10 ? p = (sum(lengths > 10))/len(lengths) p # **Note:** This is the proportion of birds that have beak length $>10$ in your empirical data, not the probability that any bird drawn from the population will have beak length $>10$. # ### A proxy for probability # # As stated above, we have calculated a proportion, not a probability. As a proxy for the probability, we can simulate drawing random samples (with replacement) from the data seeing how many lengths are > 10 and calculating the proportion (commonly referred to as [hacker statistics](https://speakerdeck.com/jakevdp/statistics-for-hackers)): n_samples = 10000 sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples # ### Another way to simulate coin-flips # In the above, you have used the uniform distribution to sample from a series of biased coin flips. I want to introduce you to another distribution that you can also use to do so: the **binomial distribution**. # # The **binomial distribution** with parameters $n$ and $p$ is defined as the probability distribution of # # > the number of heads seen when flipping a coin $n$ times when with $p(heads)=p$. # **Note** that this distribution essentially tells the **story** of a general model in the following sense: if we believe that they underlying process generating the observed data has a binary outcome (affected by disease or not, head or not, 0 or 1, clicked through or not), and that one the of the two outcomes occurs with probability $p$, then the probability of seeing a particular outcome is given by the **binomial distribution** with parameters $n$ and $p$. # Any process that matches the coin flip story is a Binomial process (note that you'll see such coin flips also referred to as Bernoulli trials in the literature). So we can also formulate the story of the Binomial distribution as # # > the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed. # We'll now use the binomial distribution to answer the same question as above: # * If P(heads) = 0.7 and you flip the coin ten times, how many heads will come up? # # We'll also set the seed to ensure reproducible results. # + # Set seed np.random.seed(seed=16071982) # Simulate one run of flipping the biased coin 10 times np.random.binomial(10, 0.7) # - # ### Simulating many times to get the distribution # # In the above, we have simulated the scenario once. But this only tells us one potential outcome. To see how likely it is to get $n$ heads, for example, we need to simulate it a lot of times and check what proportion ended up with $n$ heads. # + # Simulate 1,000 run of flipping the biased coin 10 times x = np.random.binomial(10, 0.3, 10000) # Plot normalized histogram of results plt.hist(x, density=True, bins=10); # - # * Group chat: what do you see in the above? # ## Hands-on # - If I flip a biased coin ($P(H)=0.3$) 20 times, what is the probability of 5 or more heads? # Solution sum(np.random.binomial(20, 0.3, 10000) >= 5)/10000 # - If I flip a fair coin 20 times, what is the probability of 5 or more heads? sum(np.random.binomial(20,0.5,10000) >= 5)/10000 # - Plot the normalized histogram of number of heads of the following experiment: flipping a fair coin 10 times. # Plot histogram x = np.random.binomial(10, 0.5, 10000) plt.hist(x, density=True, bins=10); # **Note:** you may have noticed that the _binomial distribution_ can take on only a finite number of values, whereas the _uniform distribution_ above can take on any number between $0$ and $1$. These are different enough cases to warrant special mention of this & two different names: the former is called a _probability mass function_ (PMF) and the latter a _probability distribution function_ (PDF). Time permitting, we may discuss some of the subtleties here. If not, all good texts will cover this. I like (Sivia & Skilling, 2006), among many others. # # **Question:** # * Looking at the histogram, can you tell me the probability of seeing 4 or more heads? # Enter the ECDF. # ### Empirical cumulative distribution functions (ECDFs) # An ECDF is, as an alternative to a histogram, a way to visualize univariate data that is rich in information. It allows you to visualize all of your data and, by doing so, avoids the very real problem of binning. # - can plot control plus experiment # - data plus model! # - many populations # - can see multimodality (though less pronounced) -- a mode becomes a point of inflexion! # - can read off so much: e.g. percentiles. # # See <NAME>'s great post on ECDFS [here](https://ericmjl.github.io/blog/2018/7/14/ecdfs/) AND [this twitter thread](https://twitter.com/allendowney/status/1019171696572583936) (thanks, <NAME>!). # # So what is this ECDF? # # **Definition:** In an ECDF, the x-axis is the range of possible values for the data & for any given x-value, the corresponding y-value is the proportion of data points less than or equal to that x-value. # Let's define a handy ECDF function that takes in data and outputs $x$ and $y$ data for the ECDF. def ecdf(data): """Compute ECDF for a one-dimensional array of measurements.""" # Number of data points n = len(data) # x-data for the ECDF x = np.sort(data) # y-data for the ECDF y = np.arange(1, n+1) / n return x, y # ## Hands-on # Plot the ECDF for the previous hands-on exercise. Read the answer to the following question off the ECDF: the probability of seeing 4 or more heads? # + # Generate x- and y-data for the ECDF x_flips, y_flips = ecdf(x) # Plot the ECDF plt.plot(x_flips, y_flips, marker='.', linestyle='none'); # - # ## 3. PROBABILITY DISTRIBUTIONS AND THEIR STORIES # **Credit:** Thank you to [<NAME>](http://bois.caltech.edu/) for countless hours of discussion, work and collaboration on thinking about probability distributions and their stories. All of the following is inspired by Justin & his work, if not explicitly drawn from. # ___ # # In the above, we saw that we could match data-generating processes with binary outcomes to the story of the binomial distribution. # # > The Binomial distribution's story is as follows: the number $r$ of successes in $n$ Bernoulli trials with probability $p$ of success, is Binomially distributed. # # There are many other distributions with stories also! # ### Poisson processes and the Poisson distribution # In the book [Information Theory, Inference and Learning Algorithms](https://www.amazon.com/Information-Theory-Inference-Learning-Algorithms/dp/0521642981) <NAME> tells the tale of a town called Poissonville, in which the buses have an odd schedule. Standing at a bus stop in Poissonville, the amount of time you have to wait for a bus is totally independent of when the previous bus arrived. This means you could watch a bus drive off and another arrive almost instantaneously, or you could be waiting for hours. # # Arrival of buses in Poissonville is what we call a Poisson process. The timing of the next event is completely independent of when the previous event happened. Many real-life processes behave in this way. # # * natural births in a given hospital (there is a well-defined average number of natural births per year, and the timing of one birth is independent of the timing of the previous one); # * Landings on a website; # * Meteor strikes; # * Molecular collisions in a gas; # * Aviation incidents. # # Any process that matches the buses in Poissonville **story** is a Poisson process. # # # The number of arrivals of a Poisson process in a given amount of time is Poisson distributed. The Poisson distribution has one parameter, the average number of arrivals in a given length of time. So, to match the story, we could consider the number of hits on a website in an hour with an average of six hits per hour. This is Poisson distributed. # + # Generate Poisson-distributed data samples = np.random.poisson(6, size=10**6) # Plot histogram plt.hist(samples, bins=21); # - # **Question:** Does this look like anything to you? # In fact, the Poisson distribution is the limit of the Binomial distribution for low probability of success and large number of trials, that is, for rare events. # To see this, think about the stories. Picture this: you're doing a Bernoulli trial once a minute for an hour, each with a success probability of 0.05. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 3 successes. This is just like the Poisson story of seeing 3 buses on average arrive in a given interval of time. Thus the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). This is useful because the Poisson distribution can be simpler to work with as it has only one parameter instead of two for the Binomial distribution. # ## Hands-on # Plot the ECDF of the Poisson-distributed data that you generated above. # + # Generate x- and y-data for the ECDF x_p, y_p = ecdf(samples) # Plot the ECDF plt.plot(x_p, y_p, marker='.', linestyle='none'); # - # ## Example Poisson distribution: field goals attempted per game # This section is explicitly taken from the great work of <NAME>. You can find more [here](https://github.com/justinbois/dataframed-plot-examples/blob/master/lebron_field_goals.ipynb). # Let's first remind ourselves of the story behind the Poisson distribution. # > The number of arrivals of a Poisson processes in a given set time interval is Poisson distributed. # # To quote <NAME>: # # > We could model field goal attempts in a basketball game using a Poisson distribution. When a player takes a shot is a largely stochastic process, being influenced by the myriad ebbs and flows of a basketball game. Some players shoot more than others, though, so there is a well-defined rate of shooting. Let's consider <NAME>'s field goal attempts for the 2017-2018 NBA season. # First thing's first, the data ([from here](https://www.basketball-reference.com/players/j/jamesle01/gamelog/2018)): fga = [19, 16, 15, 20, 20, 11, 15, 22, 34, 17, 20, 24, 14, 14, 24, 26, 14, 17, 20, 23, 16, 11, 22, 15, 18, 22, 23, 13, 18, 15, 23, 22, 23, 18, 17, 22, 17, 15, 23, 8, 16, 25, 18, 16, 17, 23, 17, 15, 20, 21, 10, 17, 22, 20, 20, 23, 17, 18, 16, 25, 25, 24, 19, 17, 25, 20, 20, 14, 25, 26, 29, 19, 16, 19, 18, 26, 24, 21, 14, 20, 29, 16, 9] # To show that this LeBron's attempts are ~ Poisson distributed, you're now going to plot the ECDF and compare it with the the ECDF of the Poisson distribution that has the mean of the data (technically, this is the maximum likelihood estimate). # ## HANDS ON # Generate the x and y values for the ECDF of LeBron's field attempt goals. # Generate x & y data for ECDF x_ecdf, y_ecdf = ecdf(fga) # Now we'll draw samples out of a Poisson distribution to get the theoretical ECDF (that is, simulating the model), plot it with the ECDF of the data and see how they look. # + # Number of times we simulate the model n_reps = 1000 # Plot ECDF of data plt.plot(x_ecdf, y_ecdf, '.', color='black'); # Plot ECDF of model for _ in range(n_reps): samples = np.random.poisson(np.mean(fga), size=len(fga)) x_theor, y_theor = ecdf(samples) plt.plot(x_theor, y_theor, '.', alpha=0.01, color='lightgray'); # Label your axes plt.xlabel('field goal attempts') plt.ylabel('ECDF') # - # You can see from the ECDF that LeBron's field goal attempts per game are ~ Poisson distributed. # ## Exponential distribution # We've encountered a variety of named _discrete distributions_. There are also named _continuous distributions_, such as the exponential distribution and the normal (or Gaussian) distribution. To see what the story of the exponential distribution is, let's return to Poissonville, in which the number of buses that will arrive per hour are Poisson distributed. # However, the waiting time between arrivals of a Poisson process are exponentially distributed. # # So: the exponential distribution has the following story: the waiting time between arrivals of a Poisson process are exponentially distributed. It has a single parameter, the mean waiting time. This distribution is not peaked, as we can see from its PDF. # # For an illustrative example, lets check out the time between all incidents involving nuclear power since 1974. It's a reasonable first approximation to expect incidents to be well-modeled by a Poisson process, which means the timing of one incident is independent of all others. If this is the case, the time between incidents should be exponentially distributed. # # # To see if this story is credible, we can plot the ECDF of the data with the CDF that we'd get from an exponential distribution with the sole parameter, the mean, given by the mean inter-incident time of the data. # # Load nuclear power accidents data & create array of inter-incident times df = pd.read_csv('../../data/nuclear_power_accidents.csv') df.Date = pd.to_datetime(df.Date) df = df[df.Date >= pd.to_datetime('1974-01-01')] inter_times = np.diff(np.sort(df.Date)).astype(float) / 1e9 / 3600 / 24 # + # Compute mean and sample from exponential mean = np.mean(inter_times) samples = np.random.exponential(mean, size=10**6) # Compute ECDFs for sample & model x, y = ecdf(inter_times) x_theor, y_theor = ecdf(samples) # - # Plot sample & model ECDFs plt.plot(x_theor, y_theor); plt.plot(x, y, marker='.', linestyle='none'); # We see that the data is close to being Exponentially distributed, which means that we can model the nuclear incidents as a Poisson process. # ### Normal distribution # The normal distribution, also known as the Gaussian or Bell Curve, appears everywhere. There are many reasons for this. One is the following: # # > When doing repeated measurements, we expect them to be normally distributed, owing to the many subprocesses that contribute to a measurement. This is because (a formulation of the Central Limit Theorem) **any quantity that emerges as the sum of a large number of subprocesses tends to be Normally distributed** provided none of the subprocesses is very broadly distributed. # # Now it's time to see if this holds for the measurements of the speed of light in the famous Michelson–Morley experiment: # Below, I'll plot the histogram with a Gaussian curve fitted to it. Even if that looks good, though, that could be due to binning bias. SO then you'll plot the ECDF of the data and the CDF of the model! # Load data, plot histogram import scipy.stats as st df = pd.read_csv('../../data/michelson_speed_of_light.csv') df = df.rename(columns={'velocity of light in air (km/s)': 'c'}) c = df.c.values x_s = np.linspace(299.6, 300.1, 400) * 1000 plt.plot(x_s, st.norm.pdf(x_s, c.mean(), c.std(ddof=1))) plt.hist(c, bins=9, density=True) plt.xlabel('speed of light (km/s)') plt.ylabel('PDF') # ## HANDS ON # + # Get speed of light measurement + mean & standard deviation michelson_speed_of_light = df.c.values mean = np.mean(michelson_speed_of_light) std = np.std(michelson_speed_of_light, ddof=1) # Generate normal samples w/ mean, std of data samples = np.random.normal(mean, std, size=10000) # Generate data ECDF & model CDF x, y = ecdf(michelson_speed_of_light) x_theor, y_theor = ecdf(samples) # Plot data & model (E)CDFs _ = plt.plot(x_theor, y_theor) _ = plt.plot(x, y, marker='.', linestyle='none') _ = plt.xlabel('speed of light (km/s)') _ = plt.ylabel('CDF') # - # Some of you may ask but is the data really normal? I urge you to check out Allen Downey's post [_Are your data normal? Hint: no._ ](http://allendowney.blogspot.com/2013/08/are-my-data-normal.html) # ## 4. Joint Probability & Conditional Probability # ### Joint Probability # We have already encountered joint probabilities above, perhaps without knowing it: $P(A,B)$ is the probability two events $A$ and $B$ _both_ occurring. # * For example, getting two heads in a row. # # If $A$ and $B$ are independent, then $P(A,B)=P(A)P(B)$ but be warned: this is not always (or often) the case. # # One way to think of this is considering "AND" as multiplication: the probability of A **and** B is the probability of A **multiplied** by the probability of B. # ### HANDS-ON: JOINT PROBABILITY COIN FLIPPING # Verify that $P(A,B)=P(A)P(B)$ in the two fair coin-flip case (A=heads, B=heads) by # - first simulating two coins being flipped together and calculating the proportion of occurences with two heads; # - then simulating one coin flip and calculating the proportion of heads and then doing that again and multiplying the two proportions. # # Your two calculations should give "pretty close" results and not the same results due to the (in)accuracy of simulation. # Solution: Calculate P(A,B) x_0 = np.random.binomial(2, 0.5, 10000) p_ab = sum(x_0==2)/len(x_0) plt.hist(x_0); print(p_ab) # Solution: Calculate P(A)P(B) x_1 = np.random.binomial(1, 0.5, 10000) x_2 = np.random.binomial(1, 0.5, 10000) p_a = sum(x_1 == 1)/len(x_1) p_b = sum(x_2 == 1)/len(x_2) p_a*p_b # **Note:** In order to use such simulation and _hacker statistics_ approaches to "prove" results such as the above, we're gliding over several coupled and deep technicalities. This is in the interests of the pedagogical nature of this introduction. For the sake of completeness, we'll mention that we're essentially # - Using the proportion in our simulations as a proxy for the probability (which, although Frequentist, is useful to allow you to start getting your hands dirty with probability via simluation). # # Having stated this, for ease of instruction, we'll continue to do so when thinking about joint & conditional probabilities of both simulated and real data. # ### HANDS-ON: joint probability for birds # What is the probability that two randomly selected birds have beak depths over 10 ? # Calculate P(A)P(B) of two birds having beak lengths > 10 p_a = (sum(lengths > 10))/len(lengths) p_b = (sum(lengths > 10))/len(lengths) p_a*p_b # * Calculate the joint probability using the resampling method, that is, by drawing random samples (with replacement) from the data. First calculate $P(A)P(B)$: # Calculate P(A)P(B) using resampling methods n_samples = 100000 p_a = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples p_b = sum(np.random.choice(lengths, n_samples, replace=True) > 10)/n_samples p_a*p_b # Now calculate $P(A,B)$: # Calculate P(A,B) using resampling methods n_samples = 100000 samples = np.random.choice(lengths, (n_samples,2), replace=True) _ = samples > (10, 10) p_ab = sum(np.prod(_, axis=1))/n_samples p_ab # **Task:** Interpret the results of your simulations. # ### Conditional Probability # Now that we have a grasp on joint probabilities, lets consider conditional probabilities, that is, the probability of some $A$, knowing that some other $B$ is true. We use the notation $P(A|B)$ to denote this. For example, you can ask the question "What is the probability of a finch beak having depth $<10$, knowing that the finch is of species 'fortis'?" # #### Example: conditional probability for birds # 1. What is the probability of a finch beak having depth > 10 ? # 2. What if we know the finch is of species 'fortis'? # 3. What if we know the finch is of species 'scandens'? sum(df_12.blength > 10)/len(df_12) df_fortis = df_12.loc[df_12['species'] == 'fortis'] sum(df_fortis.blength > 10)/len(df_fortis) df_scandens = df_12.loc[df_12['species'] == 'scandens'] sum(df_scandens.blength > 10)/len(df_scandens) # **Note:** These proportions are definitely different. We can't say much more currently but we'll soon see how to use hypothesis testing to see what else we can say about the differences between the species of finches. # ### Joint and conditional probabilities # # Conditional and joint probabilites are related by the following: # $$ P(A,B) = P(A|B)P(B)$$ # **Homework exercise for the avid learner:** verify the above relationship using simulation/resampling techniques in one of the cases above. # ### Hands on example: drug testing # **Question:** Suppose that a test for using a particular drug has 99% sensitivity (true positive rate) and 99% specificity (true negative rate), that is, a 1% false positive rate and 1% false negative rate. Suppose that 0.5% (5 in 1,000) of people are users of the drug. What is the probability that a randomly selected individual with a positive test is a drug user? # # **If we can answer this, it will be really cool as it shows how we can move from knowing $P(+|user)$ to $P(user|+)$, a MVP for being able to move from $P(data|model)$ to $P(model|data)$.** # In the spirit of this workshop, it's now time to harness your computational power and the intuition of simulation to solve this drug testing example. # # * Before doing so, what do you think the answer to the question _"What is the probability that a randomly selected individual with a positive test is a drug user?"_ is? Write down your guess. # Take 10,000 subjects n = 100000 # Sample for number of users, non-users users = np.random.binomial(n, 0.005, 1) non_users = n - users # How many of these users tested +ve ? u_pos = np.random.binomial(users, 0.99) # How many of these non-users tested +ve ? non_pos = np.random.binomial(non_users, 0.01) # how many of those +ve tests were for users? u_pos/(u_pos+non_pos) # **Discussion**: What you have been able to do here is to solve the following problem: you knew $P(+|user)=0.99$, but you were trying to figure out $P(user|+)$. Is the answer what you expected? If not, why not? # # If you were surprised at the answer, that's not too surprising: you've experienced the [base rate fallacy](https://en.wikipedia.org/wiki/Base_rate_fallacy). The base rate of 99% true positive may lead one to think that most positive tests will be of users, however the vast majority of the overall population are non-users, which means that there will be more that test positive incorrectly than one would otherwise expect. # # **Key note:** This is related to the serious scientific challenge posed at the beginning here: if you know the underlying parameters/model, you can figure out the distribution and the result, but often we have only the experimental result and we're trying to figure out the most appropriate model and parameters. # # It is Bayes' Theorem that lets us move between these. # ## 5. Bayes' Theorem # # $$P(B|A) = \frac{P(A|B)P(B)}{P(A)}$$ # As you may have guessed, it is Bayes' Theorem that will allow us to move back and forth between $P(data|model)$ and $P(model|data)$. As we have seen, $P(model|data)$ is usually what we're interested in as data scientists yet $P(data|model)$ is what we can easily compute, either by simulating our model or using analytic equations. # **One of the coolest things:** Bayes Theorem can be proved with a few lines of mathematics. Your instructor will do this on the chalk/white-board now. # ### Bayes Theorem solves the above drug testing problem # # Bayes Theorem can be used to analytically derive the solution to the 'drug testing' example above as follows. # From Bayes Theorem, # # $$P(user|+) = \frac{P(+|user)P(user)}{P(+)}$$ # # # We can expand the denominator here into # # $$P(+) = P(+,user) + P(+,non-user) $$ # # so that # # $$ P(+)=P(+|user)P(user) + P(+|non-user)P(non-user)$$ # # and # # $$P(user|+) = \frac{P(+|user)P(user)}{P(+|user)P(user) + P(+|non-user)P(non-user)}$$. # Calculating this explicitly yields # # $$P(user|+) = \frac{0.99\times 0.005}{0.99\times 0.005 + 0.01\times 0.995} = 0.332 $$ # This means that if an individual tests positive, there is still only a 33.2% chance that they are a user! This is because the number of non-users is so high compared to the number of users. # Coming up: from Bayes Theorem to Bayesian Inference!
notebooks/URGsADS-NYC-2020-02-19/01-Instructor-Probability_a_simulated_introduction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="FEL3NlTTDlSX" # ##### Copyright 2021 The TensorFlow Authors. # + cellView="form" id="FlUw7tSKbtg4" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] id="77z2OchJTk0l" # # Debug TF2 Migrated Training Pipeline # # <table class="tfo-notebook-buttons" align="left"> # <td> # <a target="_blank" href="https://www.tensorflow.org/guide/migrate/migration_debugging"> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png" /> # View on TensorFlow.org</a> # </td> # <td> # <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/migrate/migration_debugging.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> # Run in Google Colab</a> # </td> # <td> # <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/migration_debugging.ipynb"> # <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> # View source on GitHub</a> # </td> # <td> # <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/migrate/migration_debugging.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> # </td> # </table> # + [markdown] id="zTwPu-w6M5sz" # This notebook demonstrates how to debug training pipeline when migrating to TF2. It consists of following components: # 1. Suggested steps and code samples for debugging training pipeline # 2. Tools for debugging # 3. Other related resources # # One assumption is you have TF1.x code and trained models for comparison, and you want to build a TF2 model that achieves similar validation accuracy. # # This notebook does **NOT** cover debugging performance issues for training/inference speed or memory usage. # + [markdown] id="fKm9R4CtOAP3" # ## Debugging workflow # # Below is a general workflow for debugging your TF2 training pipelines. Note that you do not need to follow these steps in order. You can also use a binary search approach where you test the model in an intermediate step and narrow down the debugging scope. # # 1. Fix compile and runtime errors # # 2. Single forward pass validation (in a separate # [guide](./validate_correctness.ipynb)) # # a. On single CPU device # # * Verify variables are created only once # * Check variable counts, names, and shapes match # * Reset all variables, check numerical equivalence with all randomness # disabled # * Align random number generation, check numerical equivalence in inference # * (Optional) Check checkpoints are loaded properly and TF1.x/TF2 models # generate identitcal output # # b. On single GPU/TPU device # # c. With multi-device strategies # # 3. Model training numerical equivalence validation for a few steps (code # samples available below) # # a. Single training step validation using small and fixed data on single CPU # device. Specifically, check numerical equivalence for the following # components # # * losses computation # * metrics # * learning rate # * gradient computation and update # # b. Check statistics after training 3 or more steps to verify optimizer behaviors like the momentum, still with fixed data on single CPU device # # c. On single GPU/TPU device # # d. With multi-device strategies (check the intro for [MultiProcessRunner](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/distribute/multi_process_runner.py#L108) at the bottom) # # 4. End-to-end covergence testing on real dataset # # a. Check training behaviors with TensorBoard # # * use simple optimizers e.g. SGD and simple distribution strategies e.g. # `tf.distribute.OneDeviceStrategy` first # * training metrics # * evaluation metrics # * figure out what the reasonable tolerance for inherent randomness is # # b. Check equivalence with advanced optimizer/learning rate # scheduler/distribution strategies # # c. Check equivalence when using mixed precision # # 5. Additional product benchmarks # + [markdown] id="XKakQBI9-FLb" # ## Setup # + id="sopP--i7-LaF" # !pip uninstall -y -q tensorflow # + id="i1ghHyXl-Oqd" # Install tf-nightly as the DeterministicRandomTestTool is only available in # Tensorflow 2.8 # !pip install -q tf-nightly # + [markdown] id="usyRSlIRl3r2" # ### Single forward pass validation # # Single forward pass validation, including checkpoint loading, is covered in a different [colab](./validate_correctness.ipynb). # + id="HVBQbsZeVL_V" import sys import unittest import numpy as np import tensorflow as tf import tensorflow.compat.v1 as v1 # + [markdown] id="4M104dt7m5cC" # ### Model training numerical equivalence validation for a few steps # + [markdown] id="v2Nz2Ni1EkMz" # Set up model configuration and prepare a fake dataset. # + id="hUxXadzKU9rT" params = { 'input_size': 3, 'num_classes': 3, 'layer_1_size': 2, 'layer_2_size': 2, 'num_train_steps': 100, 'init_lr': 1e-3, 'end_lr': 0.0, 'decay_steps': 1000, 'lr_power': 1.0, } # make a small fixed dataset fake_x = np.ones((2, params['input_size']), dtype=np.float32) fake_y = np.zeros((2, params['num_classes']), dtype=np.int32) fake_y[0][0] = 1 fake_y[1][1] = 1 step_num = 3 # + [markdown] id="lV_n3Ukmz4Un" # Define the TF1.x model. # + id="ATa5fzL8mAwl" # Assume there is an existing TF1.x model using estimator API # Wrap the model_fn to log necessary tensors for result comparison class SimpleModelWrapper(): def __init__(self): self.logged_ops = {} self.logs = { 'step': [], 'lr': [], 'loss': [], 'grads_and_vars': [], 'layer_out': []} def model_fn(self, features, labels, mode, params): out_1 = tf.compat.v1.layers.dense(features, units=params['layer_1_size']) out_2 = tf.compat.v1.layers.dense(out_1, units=params['layer_2_size']) logits = tf.compat.v1.layers.dense(out_2, units=params['num_classes']) loss = tf.compat.v1.losses.softmax_cross_entropy(labels, logits) # skip EstimatorSpec details for prediction and evaluation if mode == tf.estimator.ModeKeys.PREDICT: pass if mode == tf.estimator.ModeKeys.EVAL: pass assert mode == tf.estimator.ModeKeys.TRAIN global_step = tf.compat.v1.train.get_or_create_global_step() lr = tf.compat.v1.train.polynomial_decay( learning_rate=params['init_lr'], global_step=global_step, decay_steps=params['decay_steps'], end_learning_rate=params['end_lr'], power=params['lr_power']) optmizer = tf.compat.v1.train.GradientDescentOptimizer(lr) grads_and_vars = optmizer.compute_gradients( loss=loss, var_list=graph.get_collection( tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)) train_op = optmizer.apply_gradients( grads_and_vars, global_step=global_step) # log tensors self.logged_ops['step'] = global_step self.logged_ops['lr'] = lr self.logged_ops['loss'] = loss self.logged_ops['grads_and_vars'] = grads_and_vars self.logged_ops['layer_out'] = { 'layer_1': out_1, 'layer_2': out_2, 'logits': logits} return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) def update_logs(self, logs): for key in logs.keys(): model_tf1.logs[key].append(logs[key]) # + [markdown] id="kki9yILSKS7f" # The following [`v1.keras.utils.DeterministicRandomTestTool`](https://www.tensorflow.org/api_docs/python/tf/compat/v1/keras/utils/DeterministicRandomTestTool) class provides a context manager `scope()` that can make stateful random operations use the same seed across both TF1 graphs/sessions and eager execution, # # The tool provides two testing modes: # 1. `constant` which uses the same seed for every single operation no matter how many times it has been called and, # 2. `num_random_ops` which uses the number of previously-observed stateful random operations as the operation seed. # # This applies both to the stateful random operations used for creating and initializing variables, and to the stateful random operations used in computation (such as for dropout layers). # + id="X6Y3RWMoKOl8" random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops') # + [markdown] id="mk5-ZzxcErX5" # Run the TF1.x model in graph mode. Collect statistics for first 3 training steps for numerical equivalence comparison. # + id="r5zhJHvsWA24" with random_tool.scope(): graph = tf.Graph() with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess: model_tf1 = SimpleModelWrapper() # build the model inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size'])) labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes'])) spec = model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params) train_op = spec.train_op sess.run(tf.compat.v1.global_variables_initializer()) for step in range(step_num): # log everything and update the model for one step logs, _ = sess.run( [model_tf1.logged_ops, train_op], feed_dict={inputs: fake_x, labels: fake_y}) model_tf1.update_logs(logs) # + [markdown] id="eZxjI8Nxz9Ea" # Define the TF2 model. # + id="AA67rh2TkS1M" class SimpleModel(tf.keras.Model): def __init__(self, params, *args, **kwargs): super(SimpleModel, self).__init__(*args, **kwargs) # define the model self.dense_1 = tf.keras.layers.Dense(params['layer_1_size']) self.dense_2 = tf.keras.layers.Dense(params['layer_2_size']) self.out = tf.keras.layers.Dense(params['num_classes']) learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=params['init_lr'], decay_steps=params['decay_steps'], end_learning_rate=params['end_lr'], power=params['lr_power']) self.optimizer = tf.keras.optimizers.SGD(learning_rate_fn) self.compiled_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True) self.logs = { 'lr': [], 'loss': [], 'grads': [], 'weights': [], 'layer_out': []} def call(self, inputs): out_1 = self.dense_1(inputs) out_2 = self.dense_2(out_1) logits = self.out(out_2) # log output features for every layer for comparison layer_wise_out = { 'layer_1': out_1, 'layer_2': out_2, 'logits': logits} self.logs['layer_out'].append(layer_wise_out) return logits def train_step(self, data): x, y = data with tf.GradientTape() as tape: logits = self(x) loss = self.compiled_loss(y, logits) grads = tape.gradient(loss, self.trainable_weights) # log training statistics step = self.optimizer.iterations.numpy() self.logs['lr'].append(self.optimizer.learning_rate(step).numpy()) self.logs['loss'].append(loss.numpy()) self.logs['grads'].append(grads) self.logs['weights'].append(self.trainable_weights) # update model self.optimizer.apply_gradients(zip(grads, self.trainable_weights)) return # + [markdown] id="I5smAcaEE8nX" # Run the TF2 model in eager mode. Collect statistics for first 3 training steps for numerical equivalence comparison. # + id="Q0AbXF_eE8cS" random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops') with random_tool.scope(): model_tf2 = SimpleModel(params) for step in range(step_num): model_tf2.train_step([fake_x, fake_y]) # + [markdown] id="cjJDjLcAz_gU" # Compare numerical equivalence for first few training steps. # # You can also check the [Validating correctness & numerical equivalence notebook](./validate_correctness.ipynb) for additonal advice for numerical equivalence. # + id="6CbCUbsCiabC" np.testing.assert_allclose(model_tf1.logs['lr'], model_tf2.logs['lr']) np.testing.assert_allclose(model_tf1.logs['loss'], model_tf2.logs['loss']) for step in range(step_num): for name in model_tf1.logs['layer_out'][step]: np.testing.assert_allclose( model_tf1.logs['layer_out'][step][name], model_tf2.logs['layer_out'][step][name]) # + [markdown] id="dhVuuciimLIY" # #### Unit tests # + [markdown] id="sXZYFC6Hhqeb" # There are a few types of unit testing that can help debug your migration code. # 1. Single forward pass validation # 2. Model training numerical equivalence validation for a few steps # 3. Benchmark inference performance # 4. The trained model makes correct predictions on fixed and simple data points # # You can use `@parameterized.parameters` to test models with different configurations. [Details with code sample](https://github.com/abseil/abseil-py/blob/master/absl/testing/parameterized.py). # # Note that it's possible to run session APIs and eager execution in the same test case. The code snippets below show how. # + id="CdHqkgPPM2Bj" import unittest class TestNumericalEquivalence(unittest.TestCase): # copied from code samples above def setup(self): # record statistics for 100 training steps step_num = 100 # setup TF 1 model random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops') with random_tool.scope(): # run TF1.x code in graph mode with context management graph = tf.Graph() with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess: self.model_tf1 = SimpleModelWrapper() # build the model inputs = tf.compat.v1.placeholder(tf.float32, shape=(None, params['input_size'])) labels = tf.compat.v1.placeholder(tf.float32, shape=(None, params['num_classes'])) spec = self.model_tf1.model_fn(inputs, labels, tf.estimator.ModeKeys.TRAIN, params) train_op = spec.train_op sess.run(tf.compat.v1.global_variables_initializer()) for step in range(step_num): # log everything and update the model for one step logs, _ = sess.run( [self.model_tf1.logged_ops, train_op], feed_dict={inputs: fake_x, labels: fake_y}) self.model_tf1.update_logs(logs) # setup TF2 model random_tool = v1.keras.utils.DeterministicRandomTestTool(mode='num_random_ops') with random_tool.scope(): self.model_tf2 = SimpleModel(params) for step in range(step_num): self.model_tf2.train_step([fake_x, fake_y]) def test_learning_rate(self): np.testing.assert_allclose( self.model_tf1.logs['lr'], self.model_tf2.logs['lr']) def test_training_loss(self): # adopt different tolerance strategies before and after 10 steps first_n_step = 10 # abosolute difference is limited below 1e-5 # set `equal_nan` to be False to detect potential NaN loss issues abosolute_tolerance = 1e-5 np.testing.assert_allclose( actual=self.model_tf1.logs['loss'][:first_n_step], desired=self.model_tf2.logs['loss'][:first_n_step], atol=abosolute_tolerance, equal_nan=False) # relative difference is limited below 5% relative_tolerance = 0.05 np.testing.assert_allclose(self.model_tf1.logs['loss'][first_n_step:], self.model_tf2.logs['loss'][first_n_step:], rtol=relative_tolerance, equal_nan=False) # + [markdown] id="gshSQdKIddpZ" # ## Debugging tools # + [markdown] id="CkMfCaJRclKv" # ### tf.print # # tf.print vs print/logging.info # # - With configurable arguments, `tf.print` can recursively display show first and last few elements of each dimension for printed tensors. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/print) for details. # - For eager execution, both `print` and `tf.print` print the value of the tensor. But `print` may involve device-to-host copy, which can potentially slow down your code. # - For graph mode including usage inside `tf.function`, you need to use `tf.print` to print the actual tensor value. `tf.print` is compiled into an op in the graph, whereas `print` and `logging.info` only log at tracing time, which is often not what you want. # - `tf.print` also supports printing composite tensors like `tf.RaggedTensor` and `tf.sparse.SparseTensor`. # - You can also use a callback to monitor metrics and variables. Please check how to use custom callbacks with [logs dict](https://www.tensorflow.org/guide/keras/custom_callback#usage_of_logs_dict) and [self.model attribute](https://www.tensorflow.org/guide/keras/custom_callback#usage_of_selfmodel_attribute). # + [markdown] id="S-5h3cX8Dc50" # tf.print vs print inside tf.function # + id="gRED9FMyDKih" # `print` prints info of tensor object # `tf.print` prints the tensor value @tf.function def dummy_func(num): num += 1 print(num) tf.print(num) return num _ = dummy_func(tf.constant([1.0])) # Output: # Tensor("add:0", shape=(1,), dtype=float32) # [2] # + [markdown] id="3QroLA_zDK2w" # tf.distribute.Strategy # # - If the `tf.function` containing `tf.print` is executed on the workers, for example when using `TPUStrategy` or `ParameterServerStrategy`, you need to check worker/parameter server logs to find the printed values. # - For `print` or `logging.info`, logs will be printed on the coordinator when using `ParameterServerStrategy`, and logs will be printed on the STDOUT on worker0 when using TPUs. # # tf.keras.Model # - When using Sequential and Functional API models, if you want to print values, e.g., model inputs or intermediate features after some layers, you have following options. # 1. [Write a custom layer](https://www.tensorflow.org/guide/keras/custom_layers_and_models) that `tf.print` the inputs. # 2. Include the intermediate outputs you want to inspect in the model outputs. # - `tf.keras.layers.Lambda` layers have (de)serialization limitations. To avoid checkpoint loading issues, write a custom subclassed layer instead. Check the [API docs](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda) for more details. # - You can't `tf.print` intermediate outputs in a `tf.keras.callbacks.LambdaCallback` if you don't have access to the actual values, but instead only to the symbolic Keras tensor objects. # # + [markdown] id="aKazGTr1ZUMG" # Option 1: write a custom layer # + id="8w4aY7wO0B4W" class PrintLayer(tf.keras.layers.Layer): def call(self, inputs): tf.print(inputs) return inputs def get_model(): inputs = tf.keras.layers.Input(shape=(1,)) out_1 = tf.keras.layers.Dense(4)(inputs) out_2 = tf.keras.layers.Dense(1)(out_1) # use custom layer to tf.print intermediate features out_3 = PrintLayer()(out_2) model = tf.keras.Model(inputs=inputs, outputs=out_3) return model model = get_model() model.compile(optimizer="adam", loss="mse") model.fit([1, 2, 3], [0.0, 0.0, 1.0]) # + [markdown] id="KNESOatq7iM9" # Option 2: include the intermediate outputs you want to inspect in the model outputs. # # Note that in such case, you may need some [customizations](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit) to use `Model.fit`. # + id="MiifvdLk7g9J" def get_model(): inputs = tf.keras.layers.Input(shape=(1,)) out_1 = tf.keras.layers.Dense(4)(inputs) out_2 = tf.keras.layers.Dense(1)(out_1) # include intermediate values in model outputs model = tf.keras.Model( inputs=inputs, outputs={ 'inputs': inputs, 'out_1': out_1, 'out_2': out_2}) return model # + [markdown] id="MvIKDZpHSLmQ" # ### pdb # You can use [pdb](https://docs.python.org/3/library/pdb.html) both in terminal and Colab to inspect intermediate values for debugging. # # + [markdown] id="Qu0n4O2umyT7" # ### Visualize graph with TensorBoard # # You can [examine the TensorFlow graph with TensorBoard](https://www.tensorflow.org/tensorboard/graphs). TensorBoard is also [supported on colab](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks). TensorBoard is a great tool to visualize summaries. You can use it to compare learning rate, model weights, gradient scale, training/validation metrics, or even model intermediate outputs between TF1.x model and migrated TF2 model through the training process and seeing if the values look as expected. # + [markdown] id="vBnxB6_xzlnT" # ### TensorFlow Profiler # # [TensorFlow Profiler](https://www.tensorflow.org/guide/profiler) can help you visualize the execution timeline on GPUs/TPUs. You can check out this [Colab Demo](https://www.tensorflow.org/tensorboard/tensorboard_profiling_keras) for its basic usage. # + [markdown] id="9wNmCSHBpiGM" # ### MultiProcessRunner # [MultiProcessRunner](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/distribute/multi_process_runner.py#L108) is a useful tool when debugging with MultiWorkerMirroredStrategy and ParameterServerStrategy. You can take a look at [this concrete example](https://github.com/keras-team/keras/blob/master/keras/integration_test/mwms_multi_process_runner_test.py) for its usage. # # Specifically for the cases of these two strategies, you are recommended to 1) not only have unit tests to cover their flow, 2) but also to attempt to reproduce failures using it in unit test to avoid launch real distributed job every time when they attempt a fix.
site/en-snapshot/guide/migrate/migration_debugging.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="9XHd5ExbUIUg" # # PyTorch 1.2 Quickstart with Google Colab # In this code tutorial we will learn how to quickly train a model to understand some of PyTorch's basic building blocks to train a deep learning model. This notebook is inspired by the ["Tensorflow 2.0 Quickstart for experts"](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb#scrollTo=DUNzJc4jTj6G) notebook. # # After completion of this tutorial, you should be able to import data, transform it, and efficiently feed the data in batches to a convolution neural network (CNN) model for image classification. # # **Author:** [<NAME>](https://twitter.com/omarsar0) # # **Complete Code Walkthrough:** [Blog post](https://medium.com/dair-ai/pytorch-1-2-quickstart-with-google-colab-6690a30c38d) # + colab={"base_uri": "https://localhost:8080/"} id="KzsiN3l_Vy1p" outputId="391e8424-6eeb-4637-e08d-41c2e707e281" # !pip3 install torch==1.2.0+cu92 torchvision==0.4.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html # + [markdown] id="uF1P_cRoWpvM" # Note: We will be using the latest stable version of PyTorch so be sure to run the command above to install the latest version of PyTorch, which as the time of this tutorial was 1.2.0. We PyTorch belowing using the `torch` module. # + id="Su0COdCqT2Wk" import torch import torch.nn as nn import torch.nn.functional as F import torchvision import torchvision.transforms as transforms # + colab={"base_uri": "https://localhost:8080/"} id="rXCYmmjyVRq5" outputId="33692cc8-a164-40c6-94d7-f55d1b170499" print(torch.__version__) # + colab={"base_uri": "https://localhost:8080/"} id="AL-PaYitlRvv" outputId="fed18ee4-a778-4cc0-8e98-ca619b1f478f" from google.colab import drive drive.mount('/content/drive') # + [markdown] id="hhuQyU7AYE6K" # ## Import The Data # The first step before training the model is to import the data. We will use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) which is like the Hello World dataset of machine learning. # # Besides importing the data, we will also do a few more things: # - We will tranform the data into tensors using the `transforms` module # - We will use `DataLoader` to build convenient data loaders or what are referred to as iterators, which makes it easy to efficiently feed data in batches to deep learning models. # - As hinted above, we will also create batches of the data by setting the `batch` parameter inside the data loader. Notice we use batches of `32` in this tutorial but you can change it to `64` if you like. I encourage you to experiment with different batches. # + id="BtbLz8ULnASn" #Configure data correctly #make rgb images and segmentation images share a file name #get image in the right format import os from numpy import asarray import numpy as np from PIL import Image def remove_chars(filename): new_filename = "" for i in range(0, len(filename)): char = filename[i] if char.isnumeric(): new_filename += char return new_filename + ".png" def config_data(rgb_img, seg_img): i = 0 for filename in os.listdir(rgb_img): old_path = os.path.join(rgb_img, filename) if not os.path.isdir(old_path): i += 1 new_filename = remove_chars(filename) os.rename(os.path.join(rgb_img, filename), os.path.join(rgb_img, new_filename)) i = 0 for filename in os.listdir(seg_img): old_path = os.path.join(seg_img, filename) if not os.path.isdir(old_path): i += 1 new_filename = remove_chars(filename) new_name = os.path.join(seg_img,new_filename) os.rename(os.path.join(seg_img, filename), os.path.join(seg_img, new_name)) #Normalize image to fit model system: https://machinelearningmastery.com/how-to-manually-scale-image-pixel-data-for-deep-learning/ image = Image.open(new_name) pixels = asarray(image) # convert from integers to floats pixels = pixels.astype('float32') # normalize to the range 0-1 then to 9 pixels /= 255.0 pixels *= 9.0 im = Image.fromarray(pixels.astype(np.uint8)) im.save(os.path.join(seg_img, new_name)) PATH = '/content/drive/myDrive/Highway_Dataset' rgb_img = '/content/drive/MyDrive/Highway_Dataset/Train/TrainSeq04/image' seg_img = '/content/drive/MyDrive/Highway_Dataset/Train/TrainSeq04/label' config_data(rgb_img, seg_img) rgb_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/image' seg_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/label' config_data(rgb_img_t, seg_img_t) # + id="tSjjLXrOVWBy" from torch.utils.data import Dataset from natsort import natsorted class CustomDataSet(Dataset): def __init__(self, main_dir, label_dir, transform): self.main_dir = main_dir self.label_dir = label_dir self.transform = transform all_imgs = os.listdir(main_dir) all_segs = os.listdir(main_dir) self.total_imgs = natsorted(all_imgs) self.total_segs = natsorted(all_segs) def __len__(self): return len(self.total_imgs) def __getitem__(self, idx): img_loc = os.path.join(self.main_dir, self.total_imgs[idx]) image = Image.open(img_loc).convert("RGB") tensor_image = self.transform(image) seg_loc = os.path.join(self.label_dir, self.total_segs[idx]) labeled_image = Image.open(seg_loc).convert("RGB") transform = transforms.Compose([transforms.Resize((12,12)), transforms.ToTensor()]) labeled_image = transform(labeled_image) labeled_image = labeled_image.float() return tensor_image, labeled_image BATCH_SIZE = 32 ## transformations transform = transforms.Compose([transforms.Resize((28,28)), transforms.ToTensor()]) rgb_img = '/content/drive/MyDrive/Highway_Dataset/Train/TrainSeq04/image' seg_img = '/content/drive/MyDrive/Highway_Dataset/Train/TrainSeq04/label' ## download and load training dataset imagenet_data = CustomDataSet(rgb_img, seg_img, transform=transform) trainloader = torch.utils.data.DataLoader(imagenet_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=2) rgb_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/image' seg_img_t = '/content/drive/MyDrive/Highway_Dataset/Test/TestSeq04/label' ## download and load training dataset imagenet_data_test = CustomDataSet(rgb_img_t, seg_img_t, transform=transform) testloader = torch.utils.data.DataLoader(imagenet_data_test, batch_size=BATCH_SIZE, shuffle=False, num_workers=2) #trainset = torchvision.datasets.MNIST(root='./data', train=True, # download=True, transform=transform) #trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, # shuffle=True, num_workers=2) ## download and load testing dataset #testset = torchvision.datasets.MNIST(root='./data', train=False, # download=True, transform=transform) #testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, #shuffle=False, num_workers=2) # + [markdown] id="0nZwZukWXUDn" # ## Exploring the Data # As a practioner and researcher, I am always spending a bit of time and effort exploring and understanding the dataset. It's fun and this is a good practise to ensure that everything is in order. # + [markdown] id="NW_loWKga7CH" # Let's check what the train and test dataset contains. I will use `matplotlib` to print out some of the images from our dataset. # + colab={"base_uri": "https://localhost:8080/", "height": 395} id="zWd9Pt1Ca6K9" outputId="f48e524b-2032-46d6-b1f3-02108edb188e" import matplotlib.pyplot as plt import numpy as np ## functions to show an image def imshow(img): #img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) ## get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() ## show images #imshow(torchvision.utils.make_grid(images)) # + [markdown] id="XFWll5Lseiht" # **EXERCISE:** Try to understand what the code above is doing. This will help you to better understand your dataset before moving forward. # + [markdown] id="d9mXAVmRvhrq" # Let's check the dimensions of a batch. # + colab={"base_uri": "https://localhost:8080/"} id="cNFKWz1GZ4R5" outputId="a5923a57-8f8c-462a-93e5-2d168f166902" for images, labels in trainloader: print("Image batch dimensions:", images.shape) print("Image label dimensions:", labels.shape) break # + [markdown] id="tmaCTw5tXowR" # ## The Model # Now using the classical deep learning framework pipeline, let's build the 1 convolutional layer model. # # Here are a few notes for those who are beginning with PyTorch: # - The model below consists of an `__init__()` portion which is where you include the layers and components of the neural network. In our model, we have a convolutional layer denoted by `nn.Conv2d(...)`. We are dealing with an image dataset that is in a grayscale so we only need one channel going in, hence `in_channels=1`. We hope to get a nice representation of this layer, so we use `out_channels=32`. Kernel size is 3, and for the rest of parameters we use the default values which you can find [here](https://pytorch.org/docs/stable/nn.html?highlight=conv2d#conv2d). # - We use 2 back to back dense layers or what we refer to as linear transformations to the incoming data. Notice for `d1` I have a dimension which looks like it came out of nowhere. 128 represents the size we want as output and the (`26*26*32`) represents the dimension of the incoming data. If you would like to find out how to calculate those numbers refer to the [PyTorch documentation](https://pytorch.org/docs/stable/nn.html?highlight=linear#conv2d). In short, the convolutional layer transforms the input data into a specific dimension that has to be considered in the linear layer. The same applies for the second linear transformation (`d2`) where the dimension of the output of the previous linear layer was added as `in_features=128`, and `10` is just the size of the output which also corresponds to the number of classes. # - After each one of those layers, we also apply an activation function such as `ReLU`. For prediction purposes, we then apply a `softmax` layer to the last transformation and return the output of that. # + id="_IYnV4ZBa3cJ" class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() # 28x28x1 => 26x26x32 self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3) self.drop1 = nn.Dropout(0.2) self.pool1 = nn.MaxPool2d((2,2)) def forward(self, x): # 32x1x28x28 => 32x32x26x26 x = self.conv1(x) x = self.drop1(x) x = self.conv1(x) x = self.pool1(x) x = F.relu(x) # flatten => 32 x (32*26*26) #x = x.flatten(start_dim = 1) #32 x (32*26*26) => 32x128 #x = self.d1(x) #x = F.relu(x) #x = x.reshape([32, 3, 28, 28]) #x = self.d2(x) #x = F.relu(x) # logits => 32x10 logits = x out = F.softmax(logits, dim=1) return out # + [markdown] id="evsFbkq_X6bc" # As I have done in my previous tutorials, I always encourage to test the model with 1 batch to ensure that the output dimensions are what we expect. # + colab={"base_uri": "https://localhost:8080/"} id="1poxFYqftKov" outputId="3b507800-8f09-46ac-802b-a2ce0c2b8358" ## test the model with 1 batch model = MyModel() for images, labels in trainloader: print("batch size:", images.shape) out = model(images) print(out.shape, labels.shape) break # + [markdown] id="9h_3eZQRHV_P" # ## Training the Model # Now we are ready to train the model but before that we are going to setup a loss function, an optimizer and a function to compute accuracy of the model. # + id="3_0Vjq2RHlph" learning_rate = 0.001 num_epochs = 10 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = MyModel() model = model.to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # + id="44IdrNNeIi_I" ## compute accuracy def compute_accuracy(hypothesis, Y_target): Y_prediction = hypothesis.data.max(dim=1)[1] accuracy = torch.mean((Y_prediction.data == Y_target.data.max(dim=1)[1].data).float() ) return accuracy.item() # + [markdown] id="nK3EcuIOISSR" # Now it's time for training. # + colab={"base_uri": "https://localhost:8080/"} id="E59hwZlAIVcL" outputId="de27d128-783f-4b04-c64d-0e315e577d4c" for epoch in range(num_epochs): train_running_loss = 0.0 train_acc = 0.0 model = model.train() ## training step for i, (images, labels) in enumerate(trainloader): images = images.to(device) labels = labels.to(device) ## forward + backprop + loss logits = model(images) loss = criterion(logits, labels) optimizer.zero_grad() loss.backward() ## update model params optimizer.step() train_running_loss += loss.detach().item() train_acc += compute_accuracy(logits, labels) model.eval() print('Epoch: %d | Loss: %.4f | Train Accuracy: %.2f' \ %(epoch, train_running_loss / i, train_acc/i)) # + [markdown] id="QuZxfQc1UIU-" # We can also compute accuracy on the testing dataset to see how well the model performs on the image classificaiton task. As you can see below, our basic CNN model is performing very well on the MNIST classification task. # + colab={"base_uri": "https://localhost:8080/"} id="YU5WR0BTUHv1" outputId="5e0cca0f-3665-4d41-f976-c098f827ff71" test_acc = 0.0 for i, (images, labels) in enumerate(testloader, 0): images = images.to(device) labels = labels.to(device) outputs = model(images) test_acc += compute_accuracy(outputs, labels) print('Test Accuracy: %.2f'%( test_acc/i)) # + colab={"base_uri": "https://localhost:8080/"} id="nBlhMlcqShun" outputId="82e84933-8bbf-4f46-f412-bf9bfef34058" # !pip3 install opencv-python import cv2 outputs for i, (images, labels) in enumerate(testloader, 0): images = images.to(device) labels = labels.to(device) outputs = model(images) tensor = outputs.cpu().detach().numpy() # make sure tensor is on cpu cv2.imwrite("image.png" ,tensor ) # + [markdown] id="BZz7LAewgGAK" # **EXERCISE:** As a way to practise, try to include the testing part inside the code where I was outputing the training accuracy, so that you can also keep testing the model on the testing data as you proceed with the training steps. This is useful as sometimes you don't want to wait until your model has completed training to actually test the model with the testing data. # + [markdown] id="uLQlqGPsVjOB" # ## Final Words # That's it for this tutorial! Congratulations! You are now able to implement a basic CNN model in PyTorch for image classification. If you would like, you can further extend the CNN model by adding more convolution layers and max pooling, but as you saw, you don't really need it here as results look good. If you are interested in implementing a similar image classification model using RNNs see the references below. # + [markdown] id="ztAiTq9HcS_H" # ## References # - [Building RNNs is Fun with PyTorch and Google Colab](https://colab.research.google.com/drive/1NVuWLZ0cuXPAtwV4Fs2KZ2MNla0dBUas) # - [CNN Basics with PyTorch by <NAME>](https://github.com/rasbt/deeplearning-models/blob/master/pytorch_ipynb/cnn/cnn-basic.ipynb) # - [Tensorflow 2.0 Quickstart for experts](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/advanced.ipynb#scrollTo=DUNzJc4jTj6G)
src/code_dump/diva_pytoarch_vers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # import essential packages import rasterio import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Change this path to point to the raster layer in your computer addressOrtho_b1 = r'D:\Dropbox\Dropbox\Teaching\Advanced_GIS_for_Environmental_Planning\Data\3633197_005180\NY\2014\201404_kings_county_ny_6in_sp_cnir\vol001\005180.tif' # This block reads a raster layer and coverts that to an array with rasterio.open(addressOrtho_b1,'r') as rst: # this line reads the raster into an array and converts the data type to 'float64' ortho_b1=rst.read(1).astype('float64') # this line reads the metadata of the raster kwds = rst.meta.copy() # this line gets the cell size from the metadata cellSize= kwds['transform'][0] # this line reads the value that is used for nodata orthoNodata= rst.nodata # this line converts the nodata to zero ortho_b1[ortho_b1==orthoNodata]= 0 # this line plots the raster plt.imshow(ortho_b1) # you can see what the metadata is kwds # you can see the number of rows and the bumber of columns of the raster ortho_b1.shape # you can see the cell size value cellSize
Read_a_raster.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # -*- coding:utf-8 -*- import os import numpy as np import math import matplotlib import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf from keras.models import Sequential from keras.layers import Dense#全连接层 from keras.layers import LSTM from keras.layers import Dropout from sklearn.preprocessing import StandardScaler,MinMaxScaler import tushare as ts import time TODAY = time.strftime("%Y%m%d") from datetime import datetime from datetime import timedelta matplotlib.rcParams['font.family'] = 'STSong' HIS_RECORD_START = '20100701' #历史数据起始时间 HIS_STRAT_BEFORE = '20100501' #用于回溯计算最初X个历史样本的MA和EMA等 TIMESTEPS = 30 #相关(观察期)的时间序列步数 PREDICT_NUM = 5 #向前预测的周期数 CYCLE = 6 #训练集向后(历史方向)退的周期数 CODE_LIST=['000001.SH','399006.SZ'] INDICATOR = ['upper_band','lower_band','macd_diff','macd_dea','kdj_k','kdj_d','rsi_6','rsi_14','ar','br'] #考虑到macd指标的bar与diff & dea共线,故弃之;kdj中的j同理 OHLC = ['open','high','low','close','vol','amount'] INPUT_LIST = OHLC + INDICATOR + ['trade_date'] _INPUT_LIST = ['T0'] + INPUT_LIST #T0为观察日最后一天的收盘价 INDEXSUMMARY = ['total_mv','float_mv','turnover_rate','turnover_rate_f','pe','pe_ttm','pb'] CORV_INPUT_LIST = INDEXSUMMARY + ['IFL_close','IFL_settle','1w','trade_date'] print(_INPUT_LIST) print(CORV_INPUT_LIST) # - def generate_data(seq, start_col): # 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入,第i+TIMESTEPS 到第i+TIMESTEPS+PREDICT_NUM-1项作为输出 X = [] y = [] for i in range(len(seq) - TIMESTEPS - PREDICT_NUM + 1): #print(i) X.append(seq[i:i + TIMESTEPS,start_col:-1]) # 前start-col列不作为输入,最后1列"trace_date"不作为输入 yj = np.array(seq[i + TIMESTEPS:i + TIMESTEPS + PREDICT_NUM,0] / seq[i + TIMESTEPS -1,0]) - 1 #print("yj: ",yj.shape) # print(yj) y.append(yj) # y为未来PREDICT_NUM日的收盘价除以最后一个观察日的收盘价 - 1,长度为PREDICT_NUM return np.array(X, dtype=np.float32), np.array(y, dtype=np.float32) def generate_data_pred(seq, start_col): # 序列的第i项和后面的TIMESTEPS-1项合在一起作为输入 X = [] for i in range(len(seq) - TIMESTEPS + 1): X.append(seq[i:i + TIMESTEPS,start_col:-1]) # 前start-col列不作为输入,最后1列"trace_date"不作为输入 return np.array(X, dtype=np.float32) # + # Tushare 2000条限制调用,通用封装函数 def my_daily(func,ts_code=None): filename = 'cache_data\\' + func + '_hist.xls' print("filename: ",filename) sheet = pd.DataFrame() sheet_ = pd.DataFrame() try: f =open(filename,'r') f.close() except IOError: for _ in range(3): try: print("Load history data from toshare...", ts_code) if (ts_code is None): cmd = "pro.{}(end_date='20180101')".format(func) #{}替换,实现动态函数名 print(cmd) sheet = eval(cmd) else: cmd = "pro.{}(ts_code='{}',end_date='20180101')".format(func,ts_code) print(cmd) sheet = eval(cmd) except: time.sleep(2) else: print("Write historty data to excel...") sheet.to_excel(filename,index=False) break else: print("Read historty data from excel...") sheet = pd.read_excel(io=filename) filename = 'cache_data\\' + func + TODAY + '_delta.xls' print("filename: ",filename) try: f =open(filename,'r') f.close() except IOError: for _ in range(3): try: print("Load delta data from toshare...", ts_code) # print("np.isnan(ts_code): ", ts_code == None) if (ts_code is None): cmd = "pro.{}(start_date='20180101',end_date='{}')".format(func,TODAY) #{}替换,实现动态函数名 print(cmd) sheet_ = eval(cmd) else: cmd = "pro.{}(ts_code='{}',start_date='20180101',end_date='{}')".format(func,ts_code,TODAY) print(cmd) sheet_ = eval(cmd) except: time.sleep(2) else: print("Write delta data to excel...") sheet_.to_excel(filename,index=False) sheet = pd.concat([sheet,sheet_], axis=0) return sheet else: print("Read delta data from excel...") sheet_ = pd.read_excel(io=filename) sheet = pd.concat([sheet,sheet_], axis=0) return sheet # + datatps={ 'ts_code': np.str, 'trade_date': np.str, 'close': np.float64, 'open': np.float64, 'high': np.float64, 'low': np.float64, 'pre_close': np.float64, 'change': np.float64, 'pct_chg': np.float64, 'vol': np.float64, 'amount': np.float64 } print(datatps) def my_index_daily(ts_code, start_date): filename = 'cache_data\\' + ts_code + TODAY + '.xls' print("filename: ",filename) try: f =open(filename,'r') f.close() except IOError: for _ in range(3): try: print("Load from toshare...") sheet = pro.index_daily(ts_code=ts_code, start_date=start_date) except: time.sleep(2) else: print("Write to excel...") sheet.to_excel(filename,index=False) return sheet else: print("Read from excel...") sheet = pd.read_excel(io=filename,dtype=datatps) return sheet # - time.gmtime(1551888000) str(int(time.mktime(time.strptime(TODAY,'%Y%m%d')))) # 根据注册Tushare获得的token,填入参数 pro = ts.pro_api('********************************************************') # 自行填写 df = pro.fut_basic(exchange='CFFEX', fut_type='2') df # + df_shibor = my_daily(func="shibor") df_shibor.rename(columns={'date':'trade_date'}, inplace = True) df_shibor['trade_date'] = df_shibor['trade_date'].astype(str) df_shibor.sort_values(by=['trade_date'],inplace=True) df_shibor # - df_fut = my_daily(func='fut_daily',ts_code='IF.CFX') df_fut.rename(columns={'settle':'IFL_settle'}, inplace = True) df_fut.rename(columns={'close':'IFL_close'}, inplace = True) df_fut['trade_date'] = df_fut['trade_date'].astype(str) df_fut.sort_values(by=['trade_date'],inplace=True) df_fut df_aplus = my_daily(func='index_dailybasic',ts_code='000001.SH') for idx in INDEXSUMMARY : exec("df_aplus['{}'] = df_aplus['{}'].astype(float)".format(idx,idx)) df_aplus.dtypes # + # 取指数的日特征数据 df_a_list = [] df_aa_list = [] for n in range(len(CODE_LIST)): df_a = my_index_daily(ts_code=CODE_LIST[n], start_date=HIS_STRAT_BEFORE) df_a.sort_values(by=['trade_date'],inplace=True) print("initial df_a: ") print(df_a.tail()) # Create Moving Average & Create Bollinger Bands df_a['ma21'] = df_a['close'].rolling(window=21,center=False).mean() df_a['20sd'] = df_a['close'].rolling(window=20,center=False).std() df_a['upper_band'] = df_a['ma21'] + (df_a['20sd']*2) df_a['lower_band'] = df_a['ma21'] - (df_a['20sd']*2) # Create MACD df_a['26ema'] = df_a['close'].ewm(span=26,min_periods=1,adjust=True,ignore_na=False).mean() df_a['12ema'] = df_a['close'].ewm(span=12,min_periods=1,adjust=True,ignore_na=False).mean() df_a['macd_diff'] = df_a['12ema']-df_a['26ema'] df_a['macd_dea'] = df_a['macd_diff'].ewm(span=9,min_periods=1,adjust=True,ignore_na=False).mean() df_a['macd_bar'] = 2*(df_a['macd_diff'] - df_a['macd_dea']) # Create KDJ indicator df_a['lowL'] = df_a['low'].rolling(window=9,center=False).min() df_a['lowL'].fillna(value=df_a['low'].expanding(min_periods=1).min(), inplace=True) df_a['highL'] = df_a['high'].rolling(window=9,center=False).max() df_a['highL'].fillna(value=df_a['high'].expanding(min_periods=1).max(), inplace=True) df_a['rsv'] = (df_a['close'] - df_a['lowL']) / (df_a['highL'] - df_a['lowL']) * 100 df_a['kdj_k'] = df_a['rsv'].ewm(com=2,min_periods=1,adjust=True,ignore_na=False).mean() df_a['kdj_d'] = df_a['kdj_k'].ewm(com=2,min_periods=1,adjust=True,ignore_na=False).mean() df_a['kdj_j'] = 3 * df_a['kdj_k'] - 2 * df_a['kdj_d'] # Create RSI indicator df_a['value']=df_a['close']-df_a['close'].shift(1) df_a['value1']=df_a['value'] df_a['value1'][df_a['value1'] < 0] = 0 df_a['value2']=df_a['value'] df_a['value2'][df_a['value2'] > 0] = 0 df_a['plus_6']=df_a['value1'].rolling(window=6,center=False).sum() df_a['minus_6']=df_a['value2'].rolling(window=6,center=False).sum() df_a['plus_14']=df_a['value1'].rolling(window=14,center=False).sum() df_a['minus_14']=df_a['value2'].rolling(window=14,center=False).sum() df_a['rsi_6']=df_a['plus_6']/(df_a['plus_6']-df_a['minus_6'])*100 df_a['rsi_14']=df_a['plus_14']/(df_a['plus_14']-df_a['minus_14'])*100 # Create ARBR indicator df_a['ho'] = df_a['high'] - df_a['open'] df_a['ol'] = df_a['open'] - df_a['low'] df_a['hcy'] = df_a['high'] - df_a['close'].shift(1) df_a['cyl'] = df_a['close'].shift(1) - df_a['low'] df_a['ar'] = df_a['ho'].rolling(window=26).sum() / df_a['ol'].rolling(window=26).sum() df_a['br'] = df_a['hcy'].rolling(window=26).sum() / df_a['cyl'].rolling(window=26).sum() df_a.fillna(0,inplace=True) df_a['T0'] = df_a['close'] df_a['vol'] = df_a['vol'].astype(float) df_a = df_a[df_a['trade_date'] >= HIS_RECORD_START] #只保留训练开始时点之后的记录 print("df_a: ",df_a.dtypes) print(df_a.tail()) df_a_list.append(df_a) # 以下为辅助特征df_aa df_aplus = my_daily(func='index_dailybasic',ts_code=CODE_LIST[n]) for idx in INDEXSUMMARY : exec("df_aplus['{}'] = df_aplus['{}'].astype(float)".format(idx,idx)) df_aplus['trade_date'] = df_aplus['trade_date'].astype(str) df_tmp = pd.merge(df_a[['trade_date']],df_aplus, how='left', on='trade_date') df_tmp = pd.merge(df_tmp,df_shibor[['trade_date','1w']], how='left', on='trade_date') df_tmp = pd.merge(df_tmp,df_fut[['trade_date','IFL_close','IFL_settle']], how='left', on='trade_date') df_aa = df_tmp df_aa.fillna(axis=0,method='ffill',inplace=True) print("df_aa: ",df_aa.dtypes) print(df_aa.tail()) df_aa_list.append(df_aa) del(df_a,df_aa) # - #显示所有列 pd.set_option('display.max_columns', None) df_a_list[1].tail() # + # 清理model/train/目录 import os def del_file(path_): for i in os.listdir(path_): path_file = os.path.join(path_,i) #取文件绝对路径 if os.path.isfile(path_file): os.remove(path_file) #删除文件 else: del_file(path_file) #递归删除子目录 del_file("model/train") # + import re from os import walk from os import path s = "lstm-00-02-0.001861.hdf5" p = re.compile(r'.*(?=.hdf5)') print(s.split("-")) re.findall(p,s.split("-")[-1])[0] # + # LSTM 2 import random from keras.layers import Input from keras.layers import LSTM from keras.layers import Dropout from keras.layers import concatenate from keras.models import Model,load_model from keras.callbacks import ModelCheckpoint NUM_UNIT = 100 BATCH_SIZE = 32 TRAINING_STEPS = 5 DROP_OUT = 0.2 VERBOSE_ = 2 WEIGHT = 0.2 # Weight of RNN based on y only VALIDATION_ = 0.15 LOSS = 'mae' scaler_list2 = [] model_list2 = [] tuned_model_list2 = [] tuned_model_hdf5 = [] for n in range(len(CODE_LIST)): df1 = df_a_list[n].sort_values(by=['trade_date'])[_INPUT_LIST][:-CYCLE*PREDICT_NUM] df2 = df_aa_list[n].sort_values(by=['trade_date'])[CORV_INPUT_LIST][:-(CYCLE+1)*PREDICT_NUM] # print(df1.tail()) # print(df2.tail()) scaler = StandardScaler() norm_data = scaler.fit_transform(df1.iloc[:,1:-1]) # T0保留原值,不做归一化处理 scaler2 = StandardScaler() norm_data2 = scaler2.fit_transform(df2.iloc[:,:-1]) scaler_list2.append((scaler,scaler2)) # print("scaler: ",scaler.scale_,scaler.mean_,scaler.var_ ,scaler.n_samples_seen_) print("df1.columns: ",df1.columns) norm_data = np.concatenate([np.asarray(df1.iloc[:,0:1]), norm_data, np.asarray(df1.iloc[:,-1:])],axis=1) norm_data2 = np.concatenate([norm_data2, np.asarray(df2.iloc[:,-1:])],axis=1) print("norm_data: ",type(norm_data)) print(norm_data.shape) # print(norm_data[-7:]) del(df1,df2,scaler,scaler2) print(norm_data.shape) x,y=generate_data(norm_data,1) x2=generate_data_pred(norm_data2,0) print("x shape: ",x.shape) # print(x[-6:]) print("x2 shape: ",x2.shape) # print(x2[-6:]) print("y shape: ",y.shape) # print(y[-6:]) main_in = Input(shape=(TIMESTEPS,len(INPUT_LIST)-1), dtype='float32', name='main_in') main_lstm = LSTM(NUM_UNIT,return_sequences=True,dropout=0,recurrent_dropout=0)(main_in) main_lstm = Dropout(DROP_OUT)(main_lstm) main_lstm = LSTM(NUM_UNIT,return_sequences=True,dropout=0,recurrent_dropout=0)(main_lstm) main_lstm = LSTM(NUM_UNIT,return_sequences=False,dropout=0,recurrent_dropout=0)(main_lstm) main_lstm = Dropout(DROP_OUT)(main_lstm) main_out = Dense(PREDICT_NUM,activation='linear', name='main_out')(main_lstm) # print(main_lstm) exo_in = Input(shape=(TIMESTEPS,len(CORV_INPUT_LIST)-1), dtype='float32', name='exo_in') exo_lstm = LSTM(NUM_UNIT,return_sequences=True,dropout=0,recurrent_dropout=0)(exo_in) exo_lstm = Dropout(DROP_OUT)(exo_lstm) exo_lstm = LSTM(NUM_UNIT,return_sequences=True,dropout=0,recurrent_dropout=0)(exo_lstm) exo_lstm = LSTM(NUM_UNIT,return_sequences=False,dropout=0,recurrent_dropout=0)(exo_lstm) exo_lstm = Dropout(DROP_OUT)(exo_lstm) # print(exo_lstm) z = concatenate([main_lstm, exo_lstm]) print("z: ",z) combo_out = Dense(PREDICT_NUM, activation='linear', name='combo_out')(z) print("main_out,combo_out",main_out,combo_out) r='^(lstm-' + str(n) + ')+.*hdf5' print(r) path_ = "model/tune" path_file = "" for fi in os.listdir(path_): path_file = os.path.join(path_,fi) #取文件绝对路径 # print(path_file) if os.path.isfile(path_file): res=re.search(r,fi) #查找指定正则的文件名 if res: break tuned_model_hdf5.append(path_file) print("loading tuned model: " + path_file) tuned_model = Model(inputs=[main_in, exo_in], outputs=[main_out, combo_out]) model = Model(inputs=[main_in, exo_in], outputs=[main_out, combo_out]) tuned_model = load_model(path_file) # tuned_model.compile(optimizer='adam', loss=LOSS, loss_weights=[WEIGHT, 1-WEIGHT]) # 调试代码,正式运行需注掉 model.compile(optimizer='adam', loss=LOSS, loss_weights=[WEIGHT, 1-WEIGHT]) # checkpoint filepath = "model/train/lstm-" + str(n) + "-{epoch:02d}-{val_loss:.6f}.hdf5" # 中途训练效果提升, 则将文件保存, 每提升一次, 保存一次 checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] # shuffle np.random.seed(1024) index = list(range(len(x))) random.shuffle(index) x_data = x[index] x_data2 = x2[index] y_label = y[index] print(x_data.shape,x_data2.shape,y_label.shape) # Fit fcst = model.fit(x={'main_in': x_data, 'exo_in': x_data2}, \ y={'main_out': y_label,'combo_out': y_label}, \ batch_size=BATCH_SIZE, epochs=TRAINING_STEPS,verbose=VERBOSE_,validation_split=VALIDATION_, \ callbacks=callbacks_list, shuffle=False) print(fcst) # print(dir(fcst.history)) # plot history plt.plot(fcst.history['loss'], label='train') plt.plot(fcst.history['val_loss'], label='test') plt.legend() plt.show() # tuned_model = model # 调试代码,正式运行需注掉 model_list2.append(model) tuned_model_list2.append(tuned_model) del(model,tuned_model,norm_data,norm_data2) # - tuned_model_list2[1].summary() # + # 装入/model/train下最佳LSTM模型 import re from os import walk from os import path from keras.models import Model,load_model del(model_list2) model_list2 = [] best_trained_hdf5 = [] for n in range(len(CODE_LIST)): p = re.compile(r'.*(?=.hdf5)') r='^(lstm-' + str(n) + ')+.*hdf5' print(r) for parent,dirNames,fileNames in walk("model/train"): #根据os.walk输出目录信息 ff_list = [] floss_list = [] for fileName in fileNames: #遍历所有文件名 res=re.search(r,fileName) #查找指定正则的文件名 if res: print(path.join(parent,fileName)) #拼接目录和文件名,输出 # print(parent,dirNames,fileName) ff_list.append(fileName) floss_list.append(float(re.findall(p,fileName.split("-")[-1])[0])) idx = floss_list.index(min(floss_list)) print(idx) fpath = "model/train/" + ff_list[idx] print(fpath) best_trained_hdf5.append(fpath) print("loading trained model: " + fpath) model = load_model(fpath) # model.compile(optimizer='adam', loss=LOSS, loss_weights=[WEIGHT, 1-WEIGHT]) model_list2.append(model) del(model) # + # 本次训练最佳LSTM模型评估(多周期) from scipy.spatial.distance import pdist d_list = [] for n in range(len(CODE_LIST)): df1 = df_a_list[n].sort_values(by=['trade_date'])[_INPUT_LIST][-(TIMESTEPS+CYCLE*PREDICT_NUM):] df2 = df_aa_list[n].sort_values(by=['trade_date'])[CORV_INPUT_LIST][-(TIMESTEPS+CYCLE*PREDICT_NUM):-PREDICT_NUM] print(df1.tail()) print(df2.tail()) scaler,scaler2 = scaler_list2[n] norm_data = scaler.transform(df1.iloc[:,1:-1]) norm_data2 = scaler2.transform(df2.iloc[:,:-1]) norm_data = np.concatenate([np.asarray(df1.iloc[:,0:1]), norm_data, np.asarray(df1.iloc[:,-1:])],axis=1) norm_data2 = np.concatenate([norm_data2, np.asarray(df2.iloc[:,-1:])],axis=1) print("norm_data: ",type(norm_data),norm_data.shape) print(norm_data[-7:]) x,y=generate_data(norm_data,1) y=np.asarray(df1.iloc[-(CYCLE*PREDICT_NUM):,0:1]).ravel() print("y: ",type(y),y.shape) print(y) x2=generate_data_pred(norm_data2,0) print("x shape: ",x.shape, "y shape: ",y.shape,y[-6:]) print("x2 shape: ",x2.shape) y_combo_df = pd.DataFrame() s=PREDICT_NUM i_list=[i+TIMESTEPS-1 for i,a in enumerate(x) if i % s == 0] #缝5(PREDICT_NUM)的行坐标 print("i_list: ",len(i_list)) print(i_list) x=np.array([a for i,a in enumerate(x) if i % s == 0], dtype = float) x2=np.array([a for i,a in enumerate(x2) if i % s == 0], dtype = float) #按缝5(PREDICT_NUM)x进行筛选 print("x shape: ",x.shape, "y shape: ",y.shape,y[-6:]) print("x2 shape: ",x2.shape) ema_array = norm_data[i_list,0] # 取出CYCLE个T0值 ema_array = ema_array.repeat(PREDICT_NUM).reshape(CYCLE,PREDICT_NUM) #重复PREDICT_NUM次,形成(CYCLE,PREDICT_NUM)的矩阵 print("ema_array: ",ema_array.shape) print(ema_array) ## y_,y_pred = model_list2[n].predict([x,x2]) y_combo = y_*WEIGHT + y_pred*(1-WEIGHT) y_combo = (y_combo + 1) * ema_array print("y_combo.shape: ",y_combo.shape) y_combo = y_combo[np.newaxis,:] print(CODE_LIST[n],'y_combo: ',type(y_combo),y_combo.shape) print(y_combo) print(CODE_LIST[n],'y: ',y) y_plot_temp=y y_pred_plot_temp=y_combo print("y_plot_temp.shape: ",y_plot_temp.shape) print("y_pred_plot_temp.shape: ",y_pred_plot_temp.shape) X=np.vstack([y_plot_temp.ravel(),y_pred_plot_temp.ravel()]) print('X: ',X) # d1=pdist(X,'seuclidean') d1 = pdist(X, 'cityblock') # 曼哈顿距离 # print(d1) print(CODE_LIST[n],'y_plot_temp: ',y_plot_temp) print(CODE_LIST[n],'y_pred_plot_temp: ',y_pred_plot_temp) print('曼哈顿距离: ',d1) d_list.append(d1) y_plot=list(norm_data[:-(CYCLE*PREDICT_NUM),0].ravel()) + list(y_plot_temp.ravel()) y_pred_plot=list(norm_data[:-(CYCLE*PREDICT_NUM),0].ravel()) + list(y_pred_plot_temp.ravel()) print(y_plot) print(len(y_plot),type(y_plot)) print(y_pred_plot) print(len(y_pred_plot),type(y_plot)) d__ = np.asarray(df1['trade_date'][-(TIMESTEPS+CYCLE*PREDICT_NUM):].apply(lambda x: datetime.strptime(x, '%Y%m%d').strftime('%m-%d'))).ravel() # 将日期格式由'%Y%m%d'转为'%m-%d' print('d__: ',d__.shape) print(d__) fig = plt.figure(figsize = (8,6)) ax = fig.add_subplot(1,1,1) fig.suptitle(CODE_LIST[n]+'模型评估', fontsize = 18, fontweight='bold') xticks = range(0,TIMESTEPS+CYCLE*PREDICT_NUM) xticklabels = [d_ if i % PREDICT_NUM ==0 else '' for i,d_ in enumerate(d__)] print("xticks: ",len(xticks)) print(xticks) print("xticklabels: ",len(xticklabels)) print(xticklabels) ax.plot(xticks, y_plot) # plt.plot(y_plot,label='实际') for j in range(CYCLE): ax.plot(xticks[TIMESTEPS+j*PREDICT_NUM:TIMESTEPS+(j+1)*PREDICT_NUM], y_pred_plot[TIMESTEPS+j*PREDICT_NUM:TIMESTEPS+(j+1)*PREDICT_NUM], color='r') tmp_list = ['{:.2f}'.format(i) for i in list(y_plot_temp.ravel())] tmp_pred_list = ['{:.2f}'.format(i) for i in list(y_pred_plot_temp.ravel())] tmp_array = norm_data[:,0].ravel() plt.text((TIMESTEPS+CYCLE*PREDICT_NUM) * 1.1,0.85 * tmp_array.min(),'预测值: \r\n\r\n' + '\r\n'.join(tmp_pred_list)) plt.text((TIMESTEPS+CYCLE*PREDICT_NUM) * 1.2,0.85 * tmp_array.min(),'实际值: \r\n\r\n' + '\r\n'.join(tmp_list)) ax.vlines(TIMESTEPS, 0, tmp_array.max() * 1.05, colors = "y", linestyles = "dashed") ax.set_xticks(xticks) ax.set_xticklabels(xticklabels,rotation=45) print("y_plot_temp.max(): ",tmp_array.max()) #yticks = range(int(0.3 * y_plot_temp.max()) ,int(y_plot_temp.max() * 1.2)) plt.ylim(int(0.85 * tmp_array.min()) ,int(tmp_array.max() * 1.05)) plt.legend(['实际','预测']) plt.grid(True) plt.show() #del(df_a,df_aa,df1,df2,scaler,scaler2) del(df1,df2,scaler,scaler2) # + # 原优化LSTM模型评估(多周期) tuned_d_list = [] for n in range(len(CODE_LIST)): df1 = df_a_list[n].sort_values(by=['trade_date'])[_INPUT_LIST][-(TIMESTEPS+CYCLE*PREDICT_NUM):] df2 = df_aa_list[n].sort_values(by=['trade_date'])[CORV_INPUT_LIST][-(TIMESTEPS+CYCLE*PREDICT_NUM):-PREDICT_NUM] print(df1.tail()) print(df2.tail()) scaler,scaler2 = scaler_list2[n] norm_data = scaler.transform(df1.iloc[:,1:-1]) norm_data2 = scaler2.transform(df2.iloc[:,:-1]) norm_data = np.concatenate([np.asarray(df1.iloc[:,0:1]), norm_data, np.asarray(df1.iloc[:,-1:])],axis=1) norm_data2 = np.concatenate([norm_data2, np.asarray(df2.iloc[:,-1:])],axis=1) print("norm_data: ",type(norm_data),norm_data.shape) print(norm_data[-7:]) x,y=generate_data(norm_data,1) y=np.asarray(df1.iloc[-(CYCLE*PREDICT_NUM):,0:1]).ravel() print("y: ",type(y),y.shape) print(y) x2=generate_data_pred(norm_data2,0) print("x shape: ",x.shape, "y shape: ",y.shape,y[-6:]) print("x2 shape: ",x2.shape) y_combo_df = pd.DataFrame() s=PREDICT_NUM i_list=[i+TIMESTEPS-1 for i,a in enumerate(x) if i % s == 0] #缝5(PREDICT_NUM)的行坐标 print("i_list: ",len(i_list)) print(i_list) x=np.array([a for i,a in enumerate(x) if i % s == 0], dtype = float) x2=np.array([a for i,a in enumerate(x2) if i % s == 0], dtype = float) #按缝5(PREDICT_NUM)对x进行筛选 print("x shape: ",x.shape, "y shape: ",y.shape,y[-6:]) print("x2 shape: ",x2.shape) ema_array = norm_data[i_list,0] ema_array = ema_array.repeat(PREDICT_NUM).reshape(CYCLE,PREDICT_NUM) #重复PREDICT_NUM次,形成(CYCLE,PREDICT_NUM)的矩阵 print("ema_array: ",ema_array.shape) print(ema_array) ## y_,y_pred = tuned_model_list2[n].predict([x,x2]) y_combo = y_*WEIGHT + y_pred*(1-WEIGHT) y_combo = (y_combo + 1) * ema_array print("y_combo.shape: ",y_combo.shape) y_combo = y_combo[np.newaxis,:] print(CODE_LIST[n],'y_combo: ',type(y_combo),y_combo.shape) print(y_combo) print(CODE_LIST[n],'y: ',y) y_plot_temp=y y_pred_plot_temp=y_combo print("y_plot_temp.shape: ",y_plot_temp.shape) print("y_pred_plot_temp.shape: ",y_pred_plot_temp.shape) X=np.vstack([y_plot_temp.ravel(),y_pred_plot_temp.ravel()]) print('X: ',X) # d1=pdist(X,'seuclidean') d1 = pdist(X, 'cityblock') # 曼哈顿距离 # print(d1) tuned_d_list.append(d1) print(CODE_LIST[n],'y_plot_temp: ',y_plot_temp) print(CODE_LIST[n],'y_pred_plot_temp: ',y_pred_plot_temp) print('曼哈顿距离: ',d1) y_plot=list(norm_data[:-(CYCLE*PREDICT_NUM),0].ravel()) + list(y_plot_temp.ravel()) y_pred_plot=list(norm_data[:-(CYCLE*PREDICT_NUM),0].ravel()) + list(y_pred_plot_temp.ravel()) print(y_plot) print(len(y_plot),type(y_plot)) print(y_pred_plot) print(len(y_pred_plot),type(y_plot)) d__ = np.asarray(df1['trade_date'][-(TIMESTEPS+CYCLE*PREDICT_NUM):].apply(lambda x: datetime.strptime(x, '%Y%m%d').strftime('%m%d'))).ravel() # 将日期格式由'%Y%m%d'转为'%m-%d' print('d__: ',d__.shape) print(d__) fig = plt.figure(figsize = (9,6)) #plt.style.use('ggplot') ax = fig.add_subplot(1,1,1) fig.suptitle('模型V1.0(FreeStyle-Arm)袋外测试评估', fontsize = 18, color='k', fontweight='bold') #ax.set_facecolor('k') ax.set_title( '股指代码: ' + CODE_LIST[n] , fontsize = 20, color='k', fontweight='bold') xticks = range(0,TIMESTEPS+CYCLE*PREDICT_NUM) xticklabels = [d_ if i % PREDICT_NUM ==0 else '' for i,d_ in enumerate(d__)] print("xticks: ",len(xticks)) print(xticks) print("xticklabels: ",len(xticklabels)) print(xticklabels) ax.plot(xticks, y_plot) for j in range(CYCLE): ax.plot(xticks[TIMESTEPS+j*PREDICT_NUM:TIMESTEPS+(j+1)*PREDICT_NUM], y_pred_plot[TIMESTEPS+j*PREDICT_NUM:TIMESTEPS+(j+1)*PREDICT_NUM], color='r') tmp_list = ['{:.0f}'.format(i) for i in list(y_plot_temp.ravel())] tmp_pred_list = ['{:.0f}'.format(i) for i in list(y_pred_plot_temp.ravel())] tmp_array = norm_data[:,0].ravel() plt.text((TIMESTEPS+CYCLE*PREDICT_NUM) * 1.05,0.85 * tmp_array.min(),'预测:\r\n\r\n' + '\r\n'.join(tmp_pred_list)) plt.text((TIMESTEPS+CYCLE*PREDICT_NUM) * 1.10,0.85 * tmp_array.min(),'实际:\r\n\r\n' + '\r\n'.join(tmp_list)) ax.vlines(TIMESTEPS, 0, tmp_array.max() * 1.05, colors = "y", linestyles = "dashed") ax.set_xticks(xticks) ax.set_xticklabels(xticklabels,rotation=45) print("y_plot_temp.max(): ",tmp_array.max()) #yticks = range(int(0.3 * y_plot_temp.max()) ,int(y_plot_temp.max() * 1.2)) plt.ylim(int(0.85 * tmp_array.min()) ,int(tmp_array.max() * 1.05)) plt.legend(['实际','预测']) plt.grid(True) # plt.show() plt.savefig("pic/" + CODE_LIST[n] + "_test.png") plt.show() #del(df_a,df_aa,df1,df2,scaler,scaler2) del(df1,df2,scaler,scaler2) # + # 根据比较结果,更新优化LSTM模型 import shutil for n in range(len(CODE_LIST)): print(d_list[n]) print(tuned_d_list[n]) if (d_list[n] < tuned_d_list[n]): fpath = best_trained_hdf5[n] print(fpath) path_file = tuned_model_hdf5[n] print(path_file) os.remove(path_file) #删除文件 #shutil.copy(fpath,path_) path_file = fpath.replace("train","tune") print(path_file) model_list2[n].save(path_file, overwrite=True, include_optimizer=True) else: # 新模型不如原优化模型稳定,使用原模型预测 print("using the tuned model for: " + str(n)) model_list2[n] = tuned_model_list2[n] # + import urllib.request as request import matplotlib.dates as mdates ''' @query a single date: string '20170401'; @api return day_type: 0 workday 1 weekend 2 holiday -1 err @function return day_type: 1 workday 0 weekend&holiday ''' d = timedelta(days=1) # - # 判断日期假日类型 def get_day_type(query_date): url = 'http://tool.bitefu.net/jiari/?d=' + query_date resp = request.urlopen(url) content = resp.read() if content: try: day_type = int(content) except ValueError: return -1 else: return day_type else: return -1 # 判断是否交易日 def is_tradeday(query_date): weekday = datetime.strptime(query_date, '%Y%m%d').isoweekday() if weekday <= 5 and get_day_type(query_date) == 0: return 1 else: return 0 # + # LSTM2模型预测 font_ax = { \ 'fontsize':11, \ 'fontweight': 'bold', \ 'verticalalignment': 'baseline', \ 'horizontalalignment': 'center'} for n in range(len(CODE_LIST)): df1 = df_a_list[n].sort_values(by=['trade_date'])[_INPUT_LIST][-(TIMESTEPS):] df2 = df_aa_list[n].sort_values(by=['trade_date'])[CORV_INPUT_LIST][-(TIMESTEPS):] df_date = df_a_list[n].sort_values(by=['trade_date'])[["trade_date"]][-(TIMESTEPS):] l_date = df_date['trade_date'].tolist() xs = [datetime.strptime(d_, '%Y%m%d').date() for d_ in l_date] #补全一个预测周期的日期(datatime型) for i in range(PREDICT_NUM): xs_tail = xs[-1] + d while is_tradeday(xs_tail.strftime('%Y%m%d')) == 0: xs_tail = xs_tail + d xs.append(xs_tail) print(type(xs),xs) print(df1.tail()) print(df2.tail()) scaler,scaler2 = scaler_list2[n] norm_data = scaler.transform(df1.iloc[:,1:-1]) norm_data2 = scaler2.transform(df2.iloc[:,:-1]) norm_data = np.concatenate([np.asarray(df1.iloc[:,0:1]), norm_data, np.asarray(df1.iloc[:,-1:])],axis=1) norm_data2 = np.concatenate([norm_data2, np.asarray(df2.iloc[:,-1:])],axis=1) print("norm_data: ",type(norm_data),norm_data.shape) print(norm_data[-7:]) x=generate_data_pred(norm_data,1) x2=generate_data_pred(norm_data2,0) print("x shape: ",x.shape) print("x2 shape: ",x2.shape) y_combo_df = pd.DataFrame() y_,y_pred = model_list2[n].predict([x,x2]) y_combo = y_*WEIGHT + y_pred*(1-WEIGHT) y_combo = (y_combo + 1) * norm_data[TIMESTEPS-1,0] # print("y_combo.shape: ",y_combo.shape) y_combo = y_combo[np.newaxis,:] print(CODE_LIST[n],'y_pred: ',y_pred) print(CODE_LIST[n],'y_: ',y_) print(CODE_LIST[n],'y_combo: ',y_combo) y_pred_plot_temp=y_combo print(CODE_LIST[n],'y_pred_plot_temp: ',y_pred_plot_temp) y_pred_plot=list(norm_data[:,0].ravel()) + list(y_pred_plot_temp.ravel()) # 跟评估不一样 print("y_pred_plot: ",len(y_pred_plot),type(y_pred_plot)) print(y_pred_plot) fig = plt.figure(figsize = (10,6)) # plt.style.use('ggplot') ax = fig.add_subplot(1,1,1) # fig.subplots_adjust(wspace=0.5,hspace=0.5) fig.suptitle('模型V1.0(FreeStyle-Arm)未来5日股指预测', fontsize = 18, color='k', fontweight='bold') #ax.set_facecolor('k') ax.set_title( '股指代码: ' + CODE_LIST[n] , fontsize = 20, color='k', fontweight='bold') ax.set_xlabel("日期",fontsize=14, fontweight='bold') ax.set_ylabel("指数",fontsize=14, fontweight='bold') xticks = range(len(xs)) # xticklabels = [d_.strftime('%Y-%m-%d') for d_ in xs] xticklabels = [((d_.strftime('%m-%d') + ' ') if i % PREDICT_NUM ==0 else ' ')for i,d_ in enumerate(xs)] ax.plot(xticks[:TIMESTEPS],y_pred_plot[:-PREDICT_NUM],marker='s') ax.plot(xticks[TIMESTEPS:],y_pred_plot[-PREDICT_NUM:],marker='s') tmp_date_list = [d_.strftime('%Y-%m-%d') for i,d_ in enumerate(xs)] tmp_pred_list = ['{:.2f}'.format(i) for i in y_pred_plot] tmp_array = np.array(y_pred_plot) print(tmp_date_list) plt.text((TIMESTEPS+PREDICT_NUM) * 1.02,1.005 * tmp_array.min(), \ '交易日:\\\r\n\r\n\r\n' + '\r\n\r\n\r\n'.join(tmp_date_list[-PREDICT_NUM:]) + '\r\n\r\n\r\n\r\n', fontsize=12) plt.text((TIMESTEPS+PREDICT_NUM) * 1.02,1.000 * tmp_array.min(), \ ' 预测值:\r\n\r\n\r\n ' + '\r\n\r\n\r\n '.join(tmp_pred_list[-PREDICT_NUM:]) + '\r\n\r\n\r\n* 该预测结果\r\n不做投资建议', fontsize=12) # position bottom right plt.text(TIMESTEPS-1, tmp_array.min(), 'No matter you believe or not,\r\n the logic of AI is still there.',fontsize=33, color='gray',ha='right', va='bottom', alpha=0.4) plt.text(TIMESTEPS, (tmp_array.mean() + tmp_array.min()) / 2, '来自公众号:\r\n股指神通',fontsize=66, color='gray',ha='right', va='bottom', alpha=0.4) ax.axvline(xticks[TIMESTEPS] + 2.5, color="k", linewidth=plt.rcParams['lines.linewidth'] * 66, linestyle = '-', alpha=0.15) ax.set_xticks(xticks) ax.set_xticklabels(xticklabels,fontdict=font_ax, rotation=45) print("ax.get_yticks(): ",ax.get_yticks()) ysticks = ax.get_yticks() yticklabels = [(str(int(i)) + ' ') for i in ysticks] ax.set_yticklabels(yticklabels,fontdict=font_ax) plt.legend(('当前', '预测')) plt.grid(True) plt.savefig("pic/" + CODE_LIST[n] + "_prod.png") plt.show() del(df1,df2,scaler)
stock_a-os-v1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Extend labels # The `extend_labels_with_maximum_radius` function, which has a counter part in scikit-image called `expand_labels` is a special dilation operation generating a label map corresponding to a simplified Voronoi diagram from a label image. Let's compare performance of the two. # # **Note:** benchmarking results vary heavily depending on image size, kernel size, used operations, parameters and used hardware. Use this notebook to adapt it to your use-case scenario and benchmark on your target hardware. If you have different scenarios or use-cases, you are very welcome to submit your notebook as pull-request! # + import pyclesperanto_prototype as cle from skimage import filters import time # to measure kernel execution duration properly, we need to set this flag. It will slow down exection of workflows a bit though cle.set_wait_for_kernel_finish(True) # selet a GPU with the following in the name. This will fallback to any other GPU if none with this name is found cle.select_device('RTX') # - # The following example is taken from https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_expand_labels.html # + import numpy as np import matplotlib.pyplot as plt from skimage.filters import sobel from skimage.measure import label from skimage.segmentation import watershed, expand_labels from skimage.color import label2rgb from skimage import data coins = data.coins() # Make segmentation using edge-detection and watershed. edges = sobel(coins) # Identify some background and foreground pixels from the intensity values. # These pixels are used as seeds for watershed. markers = np.zeros_like(coins) foreground, background = 1, 2 markers[coins < 30.0] = background markers[coins > 150.0] = foreground ws = watershed(edges, markers) seg1 = label(ws == foreground) expanded = expand_labels(seg1, distance=10) # Show the segmentations. fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 5), sharex=True, sharey=True) color1 = label2rgb(seg1, image=coins, bg_label=0) axes[0].imshow(color1) axes[0].set_title('Sobel+Watershed') color2 = label2rgb(expanded, image=coins, bg_label=0) axes[1].imshow(color2) axes[1].set_title('Expanded labels') for a in axes: a.axis('off') fig.tight_layout() plt.show() # - # expanding labels with scikit-image for i in range(0, 10): start_time = time.time() expanded = expand_labels(seg1, distance=10) print("skimage expand_labels duration: " + str(time.time() - start_time)) # + # expanding labels with pyclesperanto labels_gpu = cle.push_zyx(seg1) expanded = None for i in range(0, 10): start_time = time.time() expanded = cle.extend_labels_with_maximum_radius(labels_gpu, expanded, 10) print("pyclesperanto extend_labels_with_maximum_radius duration: " + str(time.time() - start_time)) # - # ## 3D seg_stack = np.asarray([seg1] * 50) print(seg_stack.shape) # expanding labels with scikit-image for i in range(0, 10): start_time = time.time() expanded = expand_labels(seg_stack, distance=10) print("skimage expand_labels duration: " + str(time.time() - start_time)) # + # expanding labels with pyclesperanto labels_gpu = cle.push_zyx(seg_stack) expanded = None for i in range(0, 10): start_time = time.time() expanded = cle.extend_labels_with_maximum_radius(labels_gpu, expanded, 10) print("pyclesperanto extend_labels_with_maximum_radius duration: " + str(time.time() - start_time)) # -
benchmarks/extend_labels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !python -c "import monai" || pip install -q "monai-weekly[gdown, nibabel, tqdm]" # + import gdown import os import matplotlib.pyplot as plt import numpy as np import torch from torch import jit from monai.apps.deepgrow.transforms import ( AddGuidanceFromPointsd, AddGuidanceSignald, ResizeGuidanced, RestoreLabeld, SpatialCropGuidanced, ) from monai.transforms import ( AsChannelFirstd, Spacingd, LoadImaged, AddChanneld, NormalizeIntensityd, EnsureTyped, ToNumpyd, Activationsd, AsDiscreted, Resized ) max_epochs = 1 def draw_points(guidance, slice_idx): if guidance is None: return colors = ['r+', 'b+'] for color, points in zip(colors, guidance): for p in points: if p[0] != slice_idx: continue p1 = p[-1] p2 = p[-2] plt.plot(p1, p2, color, 'MarkerSize', 30) def show_image(image, label, guidance=None, slice_idx=None): plt.figure("check", (12, 6)) plt.subplot(1, 2, 1) plt.title("image") plt.imshow(image, cmap="gray") if label is not None: masked = np.ma.masked_where(label == 0, label) plt.imshow(masked, 'jet', interpolation='none', alpha=0.7) draw_points(guidance, slice_idx) plt.colorbar() if label is not None: plt.subplot(1, 2, 2) plt.title("label") plt.imshow(label) plt.colorbar() # draw_points(guidance, slice_idx) plt.show() def print_data(data): for k in data: v = data[k] d = type(v) if type(v) in (int, float, bool, str, dict, tuple): d = v elif hasattr(v, 'shape'): d = v.shape if k in ('image_meta_dict', 'label_meta_dict'): for m in data[k]: print('{} Meta:: {} => {}'.format(k, m, data[k][m])) else: print('Data key: {} = {}'.format(k, d)) # + # Download data and model resource = "https://drive.google.com/uc?id=1cIlDXWx4pEFpldoIXMEe-5JeaOxzB05Z" dst = "_image.nii.gz" if not os.path.exists(dst): gdown.download(resource, dst, quiet=False) resource = "https://drive.google.com/uc?id=1BcU4Z-wdkw7xjydDNd28iVBUVDJYKqCO" dst = "deepgrow_3d.ts" if not os.path.exists(dst): gdown.download(resource, dst, quiet=False) # + # Pre Processing roi_size = [256, 256] model_size = [128, 192, 192] pixdim = (1.0, 1.0, 1.0) dimensions = 3 data = { 'image': '_image.nii.gz', 'foreground': [[66, 180, 105], [66, 180, 145]], 'background': [], } slice_idx = original_slice_idx = data['foreground'][0][2] pre_transforms = [ LoadImaged(keys='image'), AsChannelFirstd(keys='image'), Spacingd(keys='image', pixdim=pixdim, mode='bilinear'), AddGuidanceFromPointsd(ref_image='image', guidance='guidance', foreground='foreground', background='background', dimensions=dimensions), AddChanneld(keys='image'), SpatialCropGuidanced(keys='image', guidance='guidance', spatial_size=roi_size), Resized(keys='image', spatial_size=model_size, mode='area'), ResizeGuidanced(guidance='guidance', ref_image='image'), NormalizeIntensityd(keys='image', subtrahend=208.0, divisor=388.0), AddGuidanceSignald(image='image', guidance='guidance'), EnsureTyped(keys='image') ] original_image = None for t in pre_transforms: tname = type(t).__name__ data = t(data) image = data['image'] label = data.get('label') guidance = data.get('guidance') print("{} => image shape: {}".format(tname, image.shape)) guidance = guidance if guidance else [np.roll(data['foreground'], 1).tolist(), []] slice_idx = guidance[0][0][0] if guidance else slice_idx print('Guidance: {}; Slice Idx: {}'.format(guidance, slice_idx)) if tname == 'Resized': continue image = image[:, :, slice_idx] if tname in ('LoadImaged') else image[slice_idx] if tname in ( 'AsChannelFirstd', 'Spacingd', 'AddGuidanceFromPointsd') else image[0][slice_idx] label = None show_image(image, label, guidance, slice_idx) if tname == 'LoadImaged': original_image = data['image'] if tname == 'AddChanneld': original_image_slice = data['image'] if tname == 'SpatialCropGuidanced': spatial_image = data['image'] image = data['image'] label = data.get('label') guidance = data.get('guidance') for i in range(image.shape[1]): print('Slice Idx: {}'.format(i)) # show_image(image[0][i], None, guidance, i) # + # Evaluation model_path = 'deepgrow_3d.ts' model = jit.load(model_path) model.cuda() model.eval() inputs = data['image'][None].cuda() with torch.no_grad(): outputs = model(inputs) outputs = outputs[0] data['pred'] = outputs post_transforms = [ Activationsd(keys='pred', sigmoid=True), AsDiscreted(keys='pred', threshold_values=True, logit_thresh=0.5), ToNumpyd(keys='pred'), RestoreLabeld(keys='pred', ref_image='image', mode='nearest'), ] pred = None for t in post_transforms: tname = type(t).__name__ data = t(data) image = data['image'] label = data['pred'] print("{} => image shape: {}, pred shape: {}; slice_idx: {}".format(tname, image.shape, label.shape, slice_idx)) if tname in 'RestoreLabeld': pred = label image = original_image[:, :, original_slice_idx] label = label[original_slice_idx] print("PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}".format( tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label))) show_image(image, label) elif tname == 'xToNumpyd': for i in range(label.shape[1]): img = image[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][i] lab = label[0, i, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][i] if np.sum(lab) > 0: print("PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}".format( i, img.shape, lab.shape, np.min(lab), np.max(lab), np.sum(lab))) show_image(img, lab) else: image = image[0, slice_idx, :, :].detach().cpu().numpy() if torch.is_tensor(image) else image[0][slice_idx] label = label[0, slice_idx, :, :].detach().cpu().numpy() if torch.is_tensor(label) else label[0][slice_idx] print("PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}".format( tname, image.shape, label.shape, np.min(label), np.max(label), np.sum(label))) show_image(image, label) for i in range(pred.shape[0]): image = original_image[:, :, i] label = pred[i, :, :] if np.sum(label) == 0: continue print("Final PLOT:: {} => image shape: {}, pred shape: {}; min: {}, max: {}, sum: {}".format( i, image.shape, label.shape, np.min(label), np.max(label), np.sum(label))) show_image(image, label) # + pycharm={"name": "#%%\n"} pred = data['pred'] meta_data = data['pred_meta_dict'] affine = meta_data.get("affine", None) pred = np.moveaxis(pred, 0, -1) print('Prediction NII shape: {}'.format(pred.shape)) # file_name = 'result_label.nii.gz' # write_nifti(pred, file_name=file_name) # print('Prediction saved at: {}'.format(file_name)) # - # remove downloaded files os.remove('_image.nii.gz') os.remove('deepgrow_3d.ts')
deepgrow/ignite/inference_3d.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import os from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('./MNIST_data', one_hot=True) mb_size = 32 z_dim = 5 X_dim = mnist.train.images.shape[1] y_dim = mnist.train.labels.shape[1] h_dim = 128 cnt = 0 lr = 1e-3 # + # Encoder Q = nn.Sequential( nn.Linear(X_dim, h_dim), nn.ReLU(), nn.Linear(h_dim, z_dim) ) # Decoder P = nn.Sequential( nn.Linear(z_dim, h_dim), nn.ReLU(), nn.Linear(h_dim, X_dim), nn.Sigmoid() ) # Descriminator D = nn.Sequential( nn.Linear(z_dim, h_dim), nn.ReLU(), nn.Linear(h_dim, 1), nn.Sigmoid() ) # - def reset_grad(): Q.zero_grad() P.zero_grad() D.zero_grad() def sample_X(size, include_y=False): X, y = mnist.train.next_batch(size) X = torch.from_numpy(X) if include_y: y = np.argmax(y, axis=1).astype(np.int) y = torch.from_numpy(y) return X, y return X Q_optimizer = optim.Adam(Q.parameters(), lr=lr) P_optimizer = optim.Adam(P.parameters(), lr=lr) D_optimizer = optim.Adam(D.parameters(), lr=lr) for it in range(1000000): X = sample_X(mb_size) # reconstruction phase z_sample = Q(X) X_sample = P(z_sample) recon_loss = F.binary_cross_entropy(X_sample, X) recon_loss.backward() P_optimizer.step() Q_optimizer.step() reset_grad() # regularization phase z_real = torch.randn(mb_size, z_dim) z_fake = Q(X) D_real = D(z_real) D_fake = D(z_fake) D_loss = - torch.mean(torch.log(D_real) + torch.log(1 - D_fake)) D_loss.backward() D_optimizer.step() reset_grad() # generator z_fake = Q(X) D_fake = D(z_fake) G_loss = -torch.mean(torch.log(D_fake)) G_loss.backward() Q_optimizer.step() reset_grad() if it % 1000 == 0: print('Iter-{}; D_loss: {:.4}; G_loss: {:.4}; recon_loss: {:.4}'.format(it, D_loss.item(), G_loss.item(), recon_loss.item())) samples = P(z_real).detach().numpy()[:16] fig = plt.figure(figsize=(4, 4)) gs = gridspec.GridSpec(4, 4) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(28, 28), cmap='Greys_r') os.makedirs('aae', exist_ok=True) plt.savefig('aae/{}.png'.format(str(cnt).zfill(3)), bbox_inches='tight') cnt += 1 plt.close(fig)
180708-aae.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np from scipy import stats array = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4]) frequency = stats.itemfreq(array) print(frequency) frequency.shape bins = frequency.shape[0] cumulative_frequency = stats.cumfreq(array, bins) print(cumulative_frequency) # + # %matplotlib notebook import matplotlib.pyplot as plt x = np.arange(bins) fig = plt.figure(1) plt.bar(x, cumulative_frequency.cumcount, width=cumulative_frequency.binsize) plt.xlim([0, bins+1]) plt.ylim([0, cumulative_frequency.cumcount.max()+1])
frequencies/cumulative_frequency.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Over representation analysis and gene set enrichment analysis # We first run the same steps as in the previous notebook on multiple testing. # + import pandas as pd import numpy as np from scipy.stats import ttest_ind import sys sys.path.append("..") # Read loacal modules for tcga access and qvalue calculations import tcga_read as tcga import qvalue brca = tcga.get_expression_data("../../data/brca.tsv.gz", 'http://download.cbioportal.org/brca_tcga_pub2015.tar.gz',"data_RNA_Seq_v2_expression_median.txt") brca_clin = tcga.get_clinical_data("../../data/brca_clin.tsv.gz", 'http://download.cbioportal.org/brca_tcga_pub2015.tar.gz',"data_clinical_sample.txt") brca.dropna(axis=0, how='any', inplace=True) brca = brca.loc[~(brca<=0.0).any(axis=1)] brca = pd.DataFrame(data=np.log2(brca),index=brca.index,columns=brca.columns) brca_clin.loc["3N"]= (brca_clin.loc["PR status by ihc"]=="Negative") & (brca_clin.loc["ER Status By IHC"]=="Negative") & (brca_clin.loc["IHC-HER2"]=="Negative") tripple_negative_bool = (brca_clin.loc["3N"] == True) def get_significance_two_groups(row): log_fold_change = row[tripple_negative_bool].mean() - row[~tripple_negative_bool].mean() p = ttest_ind(row[tripple_negative_bool],row[~tripple_negative_bool],equal_var=False)[1] return [p,-np.log10(p),log_fold_change] pvalues = brca.apply(get_significance_two_groups,axis=1,result_type="expand") pvalues.rename(columns = {list(pvalues)[0]: 'p', list(pvalues)[1]: '-log_p', list(pvalues)[2]: 'log_FC'}, inplace = True) qvalues = qvalue.qvalues(pvalues) # - # If we investigate a Volcano plot of the tripple negative cancers vs. the other cancers, we see an large number of both up and down regulated genes. We will in this note book ecamine if there are common patterns in the up and down regulation. # + import matplotlib.pyplot as plt import seaborn as sns sns.relplot(data=qvalues,x="log_FC",y="-log_p") plt.xlabel("$log_2(FC)$") plt.ylabel("$-log_{10}(p)$") plt.show() # - # ### Over-representation analysis # # We use the [gseapy](https://gseapy.readthedocs.io/) module to run an overrepresentation analysis as implemented by the [Enrichr](http://amp.pharm.mssm.edu/Enrichr/). In the analysis we use the [KEGG](https://www.genome.jp/kegg/) database's definition of metabolomic pathways. This choice can easily be changed to other databases such as GO. # # Here we select to use the $q$ values below $10^{-15}$ as an input. # + import gseapy as gp pathway_db=['KEGG_2019_Human'] background=set(qvalues.index) gene_list = list(qvalues.loc[qvalues["q"]<1e-15,"q"].index) output_enrichr=pd.DataFrame() enr=gp.enrichr( gene_list=gene_list, gene_sets=pathway_db, background=background, outdir = None ) # - # We clean up the results a bit by only keeping some of the resulting metics. We also multiple hypothesis correct our results, and list the terms with a FDR less than 5%. kegg_enr = enr.results[["P-value","Overlap","Term"]].rename(columns={"P-value": "p"}) kegg_enr = qvalue.qvalues(kegg_enr) kegg_enr.loc[kegg_enr["q"]<0.05] # ### Debugging the Enrichr score # # A note, not nececairy a part of the course, but to anyone interested. The Enrichr method claims that they calculate their # pvalues using a hypergeometric distribution. When trying to recreate the $p$ values of the first term "Cell cycle" where our differential expression list covers # 24 of the pathway's 124 genes we find a differnt probability than anticipated. I can not see the reson for this discrepancey. If you have an explanation, please let me know. import scipy.stats as stats pvalue = stats.hypergeom.sf(24, len(background), 124, len(gene_list)) pvalue # ## Geneset Enrichment analysis # # Subsequently we us pygsea to perform a geneset enricment analysis (GSEA). # classes = ["TrippleNeg" if tripple_negative_bool[sample_name] else "Respond" for sample_name in brca.columns] gs_res = gp.gsea(data=brca, gene_sets='KEGG_2016', # gene_sets='Reactome_2013', cls=classes, # cls=class_vector # set permutation_type to phenotype if samples >=15 permutation_type='phenotype', permutation_num=100, # reduce number to speed up test outdir=None, # do not write output to disk no_plot=True, # Skip plotting method='signal_to_noise', # method='t_test', processes=4, format='png', ascending=True, max_size=20000) # The pygsea module's fdr calculation seems to be broken, and we hence remake the significance calculations ourselves. import qvalue gs_res.res2d.sort_values(by=["pval"],inplace=True) qvalue.qvalues(gs_res.res2d,"pval") # We list the 5 topscoring pathways. gs_res.res2d.head() # We display some detailed plots of the best scoring pathway using gseapy's plotting routines. from gseapy.plot import gseaplot, heatmap terms = gs_res.res2d.index gseaplot(gs_res.ranking, term=terms[0], **gs_res.results[terms[0]]) terms = gs_res.res2d.index gseaplot(gs_res.ranking, term=terms[1], **gs_res.results[terms[1]])
nb/enrichment/gsea.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + pycharm={"name": "#%%\n"} import json def parse_run_results(run_dict: dict): runs_to_parsed_results = {} for name, json_path in run_dict.items(): runs_to_parsed_results[name] = {} timesteps = [] episodes = [] exploitability = [] print(f"parsing {json_path}") with open(json_path, "r") as json_file: for line in json_file: try: json_result = json.loads(s=line) except json.JSONDecodeError: break timesteps_entry = json_result["timesteps_total"] episodes_entry = json_result["episodes_total"] try: exploitability_entry = (json_result.get("avg_policy_exploitability") or json_result.get("z_avg_policy_exploitability") or json_result.get("exploitability") or json_result.get("approx_exploitability")) if exploitability_entry is None: raise KeyError if not any(tag in json_path for tag in ["openspiel", "sparse", "xfdo", "nxdo", "no_limit"]): for i in range(99): try: next(json_file) except StopIteration: break except UnicodeDecodeError: continue except KeyError: continue timesteps.append(timesteps_entry) episodes.append(episodes_entry) exploitability.append(exploitability_entry) runs_to_parsed_results[name]["timesteps"] = timesteps runs_to_parsed_results[name]["episodes"] = episodes runs_to_parsed_results[name]["exploitability"] = exploitability return runs_to_parsed_results # + pycharm={"name": "#%%\n"} import os import tarfile from grl.utils.common import data_dir # extract m_clone_poker_data.tar.gz, containing data to graph, if we haven't already done so if not os.path.exists(f"{data_dir()}/20x_dummy_leduc_nfsp_dqn_gpu_sparse_08.29.31PM_Jan-27-2021zmalq5uj/result.json"): poker_data_path = f"{data_dir()}/m_clone_poker_data.tar.gz" print(f"extracting {poker_data_path}") tar = tarfile.open(poker_data_path, "r:gz") tar.extractall(path=data_dir()) tar.close() # + pycharm={"name": "#%%\n"} # Add your own data here. Note that PSRO runs are parsed separately. nxdo_and_nfsp_runs = { "20-Clone Leduc NFSP seed 1": f"{data_dir()}/20x_dummy_leduc_nfsp_dqn_gpu_sparse_08.29.31PM_Jan-27-2021zmalq5uj/result.json", "20-Clone Leduc NFSP seed 2": f"{data_dir()}/20x_dummy_leduc_nfsp_dqn_gpu_sparse_08.29.32PM_Jan-27-2021448z517h/result.json", "20-Clone Leduc NFSP seed 3": f"{data_dir()}/20x_dummy_leduc_nfsp_dqn_gpu_sparse_08.29.34PM_Jan-27-20219a6jc3bj/result.json", "20-Clone Leduc NXDO (Ours) seed 1": f"{data_dir()}/20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.20.33PM_Jan-27-2021/manager_results.json", "20-Clone Leduc NXDO (Ours) seed 2": f"{data_dir()}/20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.29.02PM_Jan-27-2021/manager_results.json", "20-Clone Leduc NXDO (Ours) seed 3": f"{data_dir()}/20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.29.06PM_Jan-27-2021/manager_results.json", "20-Clone Leduc NXDO-VA (Ours) seed 1": f"{data_dir()}/va_20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.29.10PM_Jan-27-2021/manager_results.json", "20-Clone Leduc NXDO-VA (Ours) seed 2": f"{data_dir()}/va_20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.29.12PM_Jan-27-2021/manager_results.json", "20-Clone Leduc NXDO-VA (Ours) seed 3": f"{data_dir()}/va_20x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_08.29.14PM_Jan-27-2021/manager_results.json", "40-Clone Leduc NFSP seed 1": f"{data_dir()}/40x_dummy_leduc_nfsp_dqn_gpu_sparse_10.17.16PM_Jan-29-2021p9347we9/result.json", "40-Clone Leduc NFSP seed 2": f"{data_dir()}/40x_dummy_leduc_nfsp_dqn_gpu_sparse_03.22.12AM_Feb-01-2021x3doc48r/result.json", "40-Clone Leduc NFSP seed 3": f"{data_dir()}/40x_dummy_leduc_nfsp_dqn_gpu_sparse_03.22.21AM_Feb-01-2021a2pbfxcy/result.json", "40-Clone Leduc NXDO-VA (Ours) seed 1": f"{data_dir()}/40x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_10.32.40PM_Jan-29-2021/manager_results.json", "40-Clone Leduc NXDO-VA (Ours) seed 2": f"{data_dir()}/va_40x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_10.30.57PM_Feb-02-2021/manager_results.json", "40-Clone Leduc NXDO-VA (Ours) seed 3": f"{data_dir()}/va_40x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_10.31.48PM_Feb-02-2021/manager_results.json", "80-Clone Leduc NFSP seed 1": f"{data_dir()}/80x_dummy_leduc_nfsp_dqn_gpu_sparse_10.16.53PM_Jan-29-2021wa_a_w0w/result.json", "80-Clone Leduc NFSP seed 2": f"{data_dir()}/80x_dummy_leduc_nfsp_dqn_gpu_sparse_03.10.09AM_Feb-01-2021k8g2kjub/result.json", "80-Clone Leduc NFSP seed 3": f"{data_dir()}/80x_dummy_leduc_nfsp_dqn_gpu_sparse_03.10.17AM_Feb-01-2021nm0cc7zc/result.json", "80-Clone Leduc NXDO-VA (Ours) seed 1": f"{data_dir()}/va_80x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_09.23.05PM_Jan-31-2021/manager_results.json", "80-Clone Leduc NXDO-VA (Ours) seed 2": f"{data_dir()}/va_80x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_03.15.40AM_Feb-01-2021/manager_results.json", "80-Clone Leduc NXDO-VA (Ours) seed 3": f"{data_dir()}/va_80x_dummy_leduc_xfdo_dqn_nfsp_gpu_dynamic_threshold_1_aggressive/manager_03.16.57AM_Feb-01-2021/manager_results.json", } psro_runs = { "20-Clone Leduc DQN-PSRO seed 1": f"{data_dir()}/seed_0_20x_dummy_leduc_psro_dqn_gpu_08.29.17PM_Jan-27-2021.json", "20-Clone Leduc DQN-PSRO seed 2": f"{data_dir()}/seed_1_20x_dummy_leduc_psro_dqn_gpu_08.29.19PM_Jan-27-2021.json", "20-Clone Leduc DQN-PSRO seed 3": f"{data_dir()}/seed_2_20x_dummy_leduc_psro_dqn_gpu_08.29.25PM_Jan-27-2021.json", "40-Clone Leduc DQN-PSRO seed 1": f"{data_dir()}/40x_dummy_leduc_psro_dqn_gpu_03.33.30AM_Feb-01-2021.json", "40-Clone Leduc DQN-PSRO seed 2": f"{data_dir()}/40x_dummy_leduc_psro_dqn_gpu_04.59.02AM_Feb-03-2021.json", "40-Clone Leduc DQN-PSRO seed 3": f"{data_dir()}/40x_dummy_leduc_psro_dqn_gpu_02.41.27AM_Feb-04-2021.json", "80-Clone Leduc DQN-PSRO seed 1": f"{data_dir()}/80x_dummy_leduc_psro_dqn_gpu_03.13.11AM_Feb-01-2021.json", "80-Clone Leduc DQN-PSRO seed 2": f"{data_dir()}/80x_dummy_leduc_psro_dqn_gpu_03.11.23AM_Feb-01-2021.json", "80-Clone Leduc DQN-PSRO seed 3": f"{data_dir()}/80x_dummy_leduc_psro_dqn_gpu_09.24.25PM_Jan-31-2021.json", } # + pycharm={"name": "#%%\n"} import itertools import pandas as pd import plotly.express as px import plotly.io as pio from grl.utils.common import datetime_str # pio.renderers.default = 'png' pio.renderers.default = "browser" runs_to_parsed_results = parse_run_results(run_dict=nxdo_and_nfsp_runs) timesteps = list(itertools.chain(*[v["timesteps"] for k, v in runs_to_parsed_results.items()])) episodes = list(itertools.chain(*[v["episodes"] for k, v in runs_to_parsed_results.items()])) exploitability = list(itertools.chain(*[v["exploitability"] for k, v in runs_to_parsed_results.items()])) run = list(itertools.chain(*[[k] * len(v["exploitability"]) for k, v in runs_to_parsed_results.items()])) in_dict = { "timesteps": timesteps, "episodes": episodes, "exploitability": exploitability, "run": run, } for run_name, data_path in psro_runs.items(): with open(data_path, "r") as json_file: data = json.load(json_file) filtered_data = {} for key, column in data.items(): filtered_data[key] = [] for i, item in enumerate(column): if data["exploitability"][i] is not None: filtered_data[key].append(data[key][i]) data = filtered_data timesteps = data.get("timesteps") or data.get("timesteps_total") or data["total_steps"] episodes = data.get("episodes") or data.get("episodes_total") or data["total_episodes"] assert len(timesteps) == len(episodes) assert len(data["exploitability"]) == len(timesteps) in_dict["timesteps"].extend(timesteps) in_dict["episodes"].extend(episodes) in_dict["exploitability"].extend(data["exploitability"]) in_dict["run"].extend(run_name for _ in data["exploitability"]) df = pd.DataFrame.from_dict(data=in_dict) fig = px.line(df, x="episodes", y="exploitability", color="run", title='Poker Exploitability') # toggles runs to be invisible by default fig.for_each_trace(lambda trace: trace.update(visible="legendonly")) fig.add_annotation( text="Double-click the legend to toggle visibility of all runs.", align='left', showarrow=False, xref='paper', yref='paper', xanchor="left", x=1, y=0.0, bordercolor=None, borderwidth=1 ) fig.show() fig.write_html(f"/tmp/exploitability_{datetime_str()}.html") # + pycharm={"name": "#%%\n"} # + pycharm={"name": "#%%\n"}
examples/graph_poker_results.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="j7bCViA-ZQcZ" import numpy as np import matplotlib.pyplot as plt import pandas as pd # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="PeLH5iRRcb7X" outputId="d5231db8-d21e-4d74-be66-5e40bae78080" df = pd.read_csv('HealthData.csv') df.head() # + colab={"base_uri": "https://localhost:8080/"} id="w79-hEz2cjTm" outputId="687da971-485f-4834-b971-ae74617b1624" X = df.iloc[:, :-1].values Y = df.iloc[:, 13].values X,Y # + colab={"base_uri": "https://localhost:8080/"} id="0YqBPQsJdI_-" outputId="19d9a431-5cb9-46a9-fe7d-afb7950194b0" # !pip install sklearn # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="qQDmaTrJcyUG" outputId="5bd3092c-10f0-42e7-c0f1-ae4b85a6f8b9" df.tail() # + id="WuHpxT3euqjJ" df.replace([np.inf, -np.inf], 0, inplace=True) df.replace((np.nan),0 , inplace=True) # + colab={"base_uri": "https://localhost:8080/"} id="keCDY8CKn6zl" outputId="575d956f-2b4e-42e1-d599-e87ce7cb6f9a" X = df.iloc[:,:-1].values Y = df.iloc[:, 13].values X,Y # + colab={"base_uri": "https://localhost:8080/"} id="FsLV_vg2oJM1" outputId="cafbc4ba-a9c6-4875-c286-4c6fc484ab43" from sklearn.model_selection import train_test_split X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.25) Y_test # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="k5ah1Tz7oXSU" outputId="e1f6e518-d311-4f58-bf6e-67cb78d044bc" import matplotlib.pyplot as plt def draw_histograms(dataframe, features, rows, cols): fig=plt.figure(figsize=(20,20)) for i, feature in enumerate(features): ax=fig.add_subplot(rows,cols,i+1) dataframe[feature].hist(bins=20,ax=ax,facecolor='green') ax.set_title(feature+" Distribution",color='DarkRed') fig.tight_layout() plt.show() draw_histograms(df,df.columns,7,2) # + colab={"base_uri": "https://localhost:8080/"} id="5rPFsn-Ut6gw" outputId="e0a48ec6-ef6d-4e76-81cf-723181d5b58a" from sklearn.linear_model import LogisticRegression classifier =LogisticRegression() classifier.fit(X_train,Y_train) y_pred=classifier.predict(X_test) from sklearn.metrics import accuracy_score accuracy_score(Y_test,y_pred) # + colab={"base_uri": "https://localhost:8080/"} id="MVWnCygUsAUg" outputId="dcf6942a-c282-43d9-c398-8b3cddb2e204" from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test, y_pred) cm # + colab={"base_uri": "https://localhost:8080/"} id="-dWe-bYvvBmX" outputId="0f7e21f6-751c-449b-ebd4-956a0a54601d" from sklearn.metrics import classification_report print(classification_report(Y_test, y_pred)) # + colab={"base_uri": "https://localhost:8080/", "height": 295} id="NP2c4FA0vLe_" outputId="7693a8c8-f43f-4d38-8af0-c81677078cdf" from sklearn.metrics import roc_auc_score from sklearn.metrics import roc_curve logit_roc_auc = roc_auc_score(Y_test, classifier.predict(X_test)) fpr, tpr, thresholds = roc_curve(Y_test, classifier.predict_proba(X_test)[:,1]) plt.figure() plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right") plt.savefig('Log_ROC1') plt.show()
Heart/LR.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="991dd280e1aa895006782f2420299f43c365bf3b" import numpy as np import pandas as pd from gensim.models import KeyedVectors from collections import Counter, defaultdict from functools import reduce import random import re import gc from tqdm.auto import tqdm import time import random import string import torch import torch.utils.data import torch.nn as nn import torch.tensor as tensor import torch.autograd as autograd from torch.autograd import Variable from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import GridSearchCV, StratifiedKFold import scipy.stats import matplotlib.pyplot as plt import matplotlib.patches as mpatches from torchsummary import summary tqdm.pandas() # - # ## Load dataset # + _uuid="243278ccda7ad7c9d6d4352f7fca1d44549d1084" DATA_DIR = "../input" TRAIN_CSV = f"{DATA_DIR}/train.csv" TEST_CSV = f"{DATA_DIR}/test.csv" train_df = pd.read_csv(TRAIN_CSV) test_df = pd.read_csv(TEST_CSV) print(f"Train shape: {train_df.shape}; cols: {list(train_df.columns)}") print(f"Test shape: {test_df.shape}; cols: {list(test_df.columns)}") # + _uuid="b6390d234bfae617a0628e8d72329f5a212d1edb" sincere = train_df.loc[train_df['target'] == 0] insincere = train_df.loc[train_df['target'] == 1] print( f"sincere: {len(sincere)} ({round(100.0 * len(sincere)/len(train_df), 3)}%); " f"insincere: {len(insincere)} ({round(100.0 * len(insincere)/len(train_df), 3)}%); " f"ratio (-/+): {round(len(sincere)/len(insincere), 3)}; " f"ratio (+/-): {round(len(insincere)/len(sincere), 3)}\n" ) print( f"sincere: {sincere.iloc[random.randint(0, len(sincere))]['question_text']}\n\n" f"insincere: {insincere.iloc[random.randint(0, len(insincere))]['question_text']}" ) print() # - # ## Load embeddings EMB_GLOVE_FILE = f"{DATA_DIR}/embeddings/glove.840B.300d/glove.840B.300d.txt" EMB_WORD2VEC_FILE = f"{DATA_DIR}/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin" EMB_PARAGRAM_FILE = f"{DATA_DIR}/embeddings/paragram_300_sl999/paragram_300_sl999.txt" EMB_WIKI_FILE = f"{DATA_DIR}/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec" # ### word2vec emb_word2vec = KeyedVectors.load_word2vec_format(EMB_WORD2VEC_FILE, binary=True) print(f"{len(emb_word2vec.vocab)} x {emb_word2vec['the'].size}") print("xiaomi" in emb_word2vec) emb_word2vec.similar_by_vector(emb_word2vec['parameter'], topn=20, restrict_vocab=None) # ### Wiki # + def load_wiki(): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMB_WIKI_FILE) if len(o)>100) return embeddings_index emb_wiki = load_wiki() # - print(f"{len(emb_wiki)} x {emb_wiki['the'].size}") print("xiaomi" in emb_wiki) # ### GloVe # + def load_glove(): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMB_GLOVE_FILE, encoding='latin')) return embeddings_index emb_glove = load_glove() # - print(f"{len(emb_glove)} x {emb_glove['a'].size}") print("parameter" in emb_glove) # + def closest_to(w, n=1): xs = [] for w_ in tqdm(emb_glove): if w == w_: continue xs += [(w_, np.dot(emb_glove[w], emb_glove[w_])/(np.linalg.norm(emb_glove[w]) * np.linalg.norm(emb_glove[w_])))] return [x for x, _ in sorted(xs, key=lambda x:-x[1])[:n]] closest_to("function", n=10) # - # ### ParaGram # + def load_paragram(): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMB_PARAGRAM_FILE, encoding="utf8", errors='ignore') if len(o)>100) return embeddings_index emb_paragram = load_paragram() # - print(f"{len(emb_paragram)} x {emb_paragram['the'].size}") print("paytm" in emb_paragram) # ## Combine embeddings # + def combine_emb_dicts(*embs): out_emb = defaultdict(lambda : np.zeros(300, dtype=np.float32)) n = len(embs) for emb in tqdm(embs, total=n): for w, e in tqdm(emb.items()): out_emb[w] += (1.0/n) * e return out_emb emb_glove_paragram = combine_emb_dicts(emb_glove, emb_paragram) # - # ## Embeddings testing # + _glove_not_w2v = Counter() _w2v_not_glove = Counter() for w in tqdm(emb_word2vec.vocab): if w not in emb_glove: _w2v_not_glove[w] += 1 for w in tqdm(emb_glove): if w not in emb_word2vec: _glove_not_w2v[w] += 1 # - print(f"glove not w2v: {len(_glove_not_w2v)}; w2v not glove: {len(_w2v_not_glove)}") print("-" * 64) print(random.sample(set(_w2v_not_glove), 10)) print("-" * 64) print(random.sample(set(_glove_not_w2v), 10)) # ## Clean dataset # + _uuid="0e6ac0681544ffa4ddf6af342222d80f9407fda3" PUNCTUATION = { 'sep' : u'\u200b' + "/-'´′‘…—−–", 'keep' : "&", 'remove': '?!.,,"#$%\'()*+-/:;<=>@[\\]^_`{|}~“”’™•°' } SYN_DICT = { 'cryptocurrencies': 'crypto currencies', 'ethereum' : 'crypto currency', 'coinbase' : 'crypto platform', 'altcoin' : 'crypto currency', 'altcoins' : 'crypto currency', 'litecoin' : 'crypto currency', 'fortnite' : 'video game', 'quorans' : 'quora members', 'quoras' : 'quora members', 'qoura' : 'quora', 'brexit' : 'britain exit', 'redmi' : 'phone', 'oneplus' : 'phone', 'hackerrank' : 'programming challenges', 'bhakts' : 'gullible', '√' : 'square root', '÷' : 'division', '∞' : 'infinity', '€' : 'euro', '£' : 'pound sterling', '$' : 'dollar', '₹' : 'rupee', '×' : 'product', 'ã' : 'a', 'è' : 'e', 'é' : 'e', 'ö' : 'o', '²' : 'squared', '∈' : 'in', '∩' : 'intersection', u'\u0398' : 'Theta', u'\u03A0' : 'Pi', u'\u03A9' : 'Omega', u'\u0392' : 'Beta', u'\u03B8' : 'theta', u'\u03C0' : 'pi', u'\u03C9' : 'omega', u'\u03B2' : 'beta', } def tokenize(s: str): return list(map(lambda w: w.strip(), s.split())) def clean_text(x): x = x.lower() for p in PUNCTUATION['sep']: x = x.replace(p, " ") for p in PUNCTUATION['keep']: x = x.replace(p, f" {p} ") for p in PUNCTUATION['remove']: x = x.replace(p, "") return x def clean_numbers(x): x = re.sub('[0-9]{5,}', '#####', x) x = re.sub('[0-9]{4}', '####', x) x = re.sub('[0-9]{3}', '###', x) x = re.sub('[0-9]{2}', '##', x) return x def clean_site(x): regex = re.compile('(www)([a-z0-9]+)(com|org)') return regex.sub(lambda m: m.group(2), x) def clean_syn(x): regex = re.compile('(%s)' % '|'.join(SYN_DICT.keys())) return regex.sub(lambda m: SYN_DICT.get(m.group(0), ''), x) def clean_all(x): x = clean_text(x) x = clean_syn(x) x = clean_site(x) return x def build_vocabulary(df: pd.DataFrame) -> Counter: sentences = df.progress_apply(tokenize).values vocab = Counter() s_len = [] for sentence in tqdm(sentences): s_len.append(len(sentence)) for word in sentence: vocab[word] += 1 return vocab, np.array(s_len) # + # clean train_df["clean_question_text"] = train_df["question_text"].progress_apply(clean_all) test_df["clean_question_text"] = test_df["question_text"].progress_apply(clean_all) # vocab train_vocab, train_s_len = build_vocabulary(train_df["clean_question_text"]) test_vocab, test_s_len = build_vocabulary(test_df["clean_question_text"]) # + d_train = scipy.stats.describe(train_s_len) d_test = scipy.stats.describe(test_s_len) print(f"train: {d_train}, median: {np.median(train_s_len)}") print(f"test: {d_test}, median: {np.median(test_s_len)}") nb = 60 plt.figure(figsize=(10, 6)) plt.hist(train_s_len, bins=nb, range=[0, 60], facecolor='red', label='train') plt.hist(test_s_len, bins=nb, range=[0, 60], facecolor='blue', label='test') plt.axvline(x=d_test.mean, color='cyan') plt.title("Sentence length", size=24) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., prop={'size': 16}) plt.xticks([5*i for i in range(14)]) plt.tight_layout() plt.show() # - # ## Most common words # + _n = 20 exclude = [ "the", "of", "and", "to", "a", "in", "is", "i", "that", "it", "for", "you", "was", "with", "on", "as", "have", "but", "be", "they" ] for w in exclude: del train_vocab[w] del test_vocab[w] Tmc = train_vocab.most_common() tmc = test_vocab.most_common() for i in range(_n): print(f"{Tmc[i]} -- {tmc[i]}") # - # ## Less common words # + n = 20 Tmc = train_vocab.most_common()[:-n-1:-1] tmc = test_vocab.most_common()[:-n-1:-1] u = 0 t = 10 for w in train_vocab: u += (train_vocab[w] <= t) print(f"[train] {round(100.0 * u/len(train_vocab), 3)}% words have <= {t} occurences") u = 0 t = 10 for w in test_vocab: u += (test_vocab[w] <= t) print(f"[test] {round(100.0 * u/len(train_vocab), 3)}% words have <= {t} occurences") print() for i in range(n): print(f"{Tmc[i]} -- {tmc[i]}") # + test_not_in_train = Counter() train_not_in_test = Counter() for w in test_vocab: if w not in train_vocab: test_not_in_train[w] += 1 for w in train_vocab: if w not in test_vocab: train_not_in_test[w] += 1 train_uniq_words = set(train_vocab.keys()) test_uniq_words = set(test_vocab.keys()) uniq_words = set(train_uniq_words.union(test_uniq_words)) all_oov = Counter() for w in uniq_words: if w not in emb_glove: all_oov[w] += 1 print(f"train not in test: {len(train_not_in_test)}") print(f"test not in train: {len(test_not_in_train)}") print(f"train uniq: {len(train_uniq_words)}") print(f"test uniq: {len(test_uniq_words)}") print(f"total uniq words: {len(uniq_words)}") # all_oov.most_common(10) ",".join([x for (x, _) in test_not_in_train.most_common(50)]) # - # ## Handle misspellings # + class HandleMisspellings: def __init__(self, all_words_set, words2idx): self.all_words_set = all_words_set self.words2idx = words2idx def prob(self, word): return self.words2idx.get(word, 0) @staticmethod def one_edit(word): letters = string.ascii_lowercase splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts) def known(self, words): return set(words).intersection(self.all_words_set) def candidates(self, word): return self.known([word]).union(self.known(self.one_edit(word))) def correct(self, word): cs = self.candidates(word) return word if len(cs) == 0 else min(cs, key=lambda w: self.prob(w)) misspelling_handler = HandleMisspellings( all_words_set=set(list(emb_glove_paragram.keys())), words2idx={w: i for (i, w) in enumerate(emb_glove_paragram.keys())} ) # - s = time.time() t = 0 misspelling_handler.correct('dang3r') # ## Embbedding coverage def get_emb_vocab_coverage(vocab, emb) -> (Counter, Counter): oov = Counter() # out-of-vocab inv = Counter() # in-vocab oov_uniq_num = inv_uniq_num = 0.0 oov_all_num = inv_all_num = 0.0 for w in tqdm(vocab): if w in emb or misspelling_handler.correction(w) in emb: inv[w] = vocab[w] inv_uniq_num += 1 inv_all_num += vocab[w] else: oov[w] = vocab[w] oov_uniq_num += 1 oov_all_num += vocab[w] cov_uniq = 100.0 * round(inv_uniq_num / len(vocab), 5) cov_all = 100.0 * round(inv_all_num / (inv_all_num + oov_all_num), 5) print(f"oov_uniq: {oov_uniq_num}; inv_uniq: {inv_uniq_num}; vocab_size: {len(vocab)}") print("embeddings-vocabulary coverage (unique): %.3f%%" % cov_uniq) print("embeddings-vocabulary coverage (all text): %.3f%%" % cov_all) return oov, inv oov, inv = get_emb_vocab_coverage(train_vocab, emb_glove_paragram) ",".join([x + f"({y})" for (x, y) in oov.most_common(50)]) oov, inv = get_emb_vocab_coverage(test_vocab, emb_paragram) ",".join([x + f"({y})" for (x, y) in oov.most_common(10)]) oov_thrd = [x for (x, y) in oov.most_common() if y > 0] len([w for w in oov_thrd if w in emb_wiki]) # --- # ## Attention class Attention(nn.Module): def __init__(self, feature_dim, step_dim, with_bias=False): super(Attention, self).__init__() self.with_bias = with_bias self.feature_dim = feature_dim self.step_dim = step_dim weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight, requires_grad=True) if with_bias: self.bias = nn.Parameter(torch.zeros(step_dim), requires_grad=True) def forward(self, x): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), # (B * step_dim) x feature_dim self.weight # feature_dim x 1 ).view(-1, step_dim) if self.with_bias: eij = eij + self.bias eij = torch.tanh(eij) # B x step_dim a = torch.exp(eij) a = a / (torch.sum(a, dim=1, keepdim=True) + 1e-10) # B x step_dim weighted_input = x * torch.unsqueeze(a, -1) # B x step_dim x feature_dim # sum over step_dim return torch.sum(weighted_input, dim=1) a = Attention(2*70, 70, True) x = torch.zeros((5, 70, 2*70)) y = a(x) y.size() # ## Model class Net(nn.Module): def __init__(self, emb_matrix, hidden_size): super(Net, self).__init__() num_words, emb_size = emb_matrix.shape # sentence maxlen self.hidden_size = hidden_size self.embedding = nn.Embedding(num_words, emb_size) self.embedding.weight = nn.Parameter(torch.tensor(emb_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.bidir_lstm1 = nn.LSTM( input_size=emb_size, hidden_size=self.hidden_size, num_layers=1, bidirectional=True, batch_first=True ) self.lstm1_attention = Attention( feature_dim=2 * self.hidden_size, step_dim=self.hidden_size, with_bias=True ) self.bidir_lstm2 = nn.LSTM( input_size=2 * self.hidden_size, hidden_size=self.hidden_size, num_layers=1, bidirectional=True, batch_first=True ) self.lstm2_attention = Attention( feature_dim=2 * self.hidden_size, step_dim=self.hidden_size, with_bias=True ) self.fc1 = nn.Linear(4 * 2 * self.hidden_size, 2 * self.hidden_size) self.fc2 = nn.Linear(2 * self.hidden_size, 1) nn.init.orthogonal_(self.fc1.weight) nn.init.zeros_(self.fc1.bias) self.dropout_emb = nn.Dropout2d(0.1) self.dropout_rnn = nn.Dropout(0.4) self.dropout_fc = nn.Dropout(0.1) self.relu = nn.ReLU() def forward(self, x): # x: B x sen_maxlen emb = self.dropout_emb(self.embedding(x)) # B x sen_maxlen x emb_size out_lstm1, _ = self.bidir_lstm1(emb) # B x sen_maxlen x (2*sen_maxlen) out_lstm1_atn = self.lstm1_attention(out_lstm1) # B x (2*sen_maxlen) out_lstm2, _ = self.bidir_lstm2(self.dropout_rnn(out_lstm1)) # B x sen_maxlen x (2*sen_maxlen) out_lstm2_atn = self.lstm2_attention(out_lstm2) # B x (2*sen_maxlen) # pooling max_pool, _ = torch.max(out_lstm2, dim=1) # B x (2*sen_maxlen) avg_pool = torch.mean(out_lstm2, dim=1) # B x (2*sen_maxlen) # concatenate results out = torch.cat((out_lstm1_atn, out_lstm2_atn, max_pool, avg_pool), dim=1) # B x (4 * 2*sen_maxlen) out = self.fc2(self.dropout_fc(self.relu(self.fc1(out)))).unsqueeze(0) # 1 x B x 1 return out # + bs = 5 x = torch.zeros((bs, 70), dtype=torch.long) m = Net(emb_matrix=np.zeros((1000,300)), hidden_size=70) y = m(x) y.size() # + submission1 = test_df[['qid']].copy() submission1.head() submission2 = pd.read_csv('../input/sample_submission.csv') submission2.head() all(submission1[['qid']] == submission2[['qid']])
quora/kernels/explore.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.7 64-bit ('venv') # metadata: # interpreter: # hash: d6fb9991202371a20b4b1fa688d203658d075847d950dccdda3b59d184d9b3f9 # name: python3 # --- # + import sys from gensim.models.phrases import Phrases, Phraser from gensim.models import Word2Vec import nltk from nltk.tokenize import wordpunct_tokenize from unidecode import unidecode # - # # Création d'un objet qui *streame* les lignes d'un fichier pour économiser de la RAM class MySentences(object): """Tokenize and Lemmatize sentences""" def __init__(self, filename): self.filename = filename def __iter__(self): for line in open(self.filename, encoding='utf-8', errors="backslashreplace"): yield [unidecode(w.lower()) for w in wordpunct_tokenize(line)] # # Chargement et traitement des phrases du corpus infile = f"../data/sents.txt" sentences = MySentences(infile) # Les 3 cellules qui suivent servent à montrer le résultat, mais ne les excécutez pas lorsque vous analysez le corpus entier. # Car lorsque le volume de texte est grand, il vaut mieux utiliser un générateur (comme MySentences ci-dessus) qui économise la RAM en streamant les phrases depuis le disque dur. sentences = [s for s in sentences] len(sentences) sentences[81] # # Détection des bigrams # # Article intéressant sur le sujet : https://towardsdatascience.com/word2vec-for-phrases-learning-embeddings-for-more-than-one-word-727b6cf723cf bigram_phrases = Phrases(sentences) # L'object `phrases` peut être vu comme un large dictionnaire d'expressions multi-mots associées à un score, le *PMI-like scoring*. Ce dictionnaire est construit par un apprentissage sur base d'exemples. # Voir les références ci-dessous : # - https://arxiv.org/abs/1310.4546 # - https://en.wikipedia.org/wiki/Pointwise_mutual_information type(bigram_phrases.vocab) # Il contient de nombreuses clés qui sont autant de termes observés dans le corpus len(bigram_phrases.vocab.keys()) # Prenons une clé au hasard : key_ = list(bigram_phrases.vocab.keys())[144] print(key_) # Le dictionnaire indique le score de cette coocurrence : bigram_phrases.vocab[key_] # Lorsque l'instance de `Phrases` a été entraînée, elle peut concaténer les bigrams dans les phrases lorsque c'est pertinent. # %time bigram_phrases[sentences[78]] # # Conversion des `Phrases` en objet `Phraser` # # `Phraser` est un alias pour `gensim.models.phrases.FrozenPhrases`, voir ici https://radimrehurek.com/gensim/models/phrases.html. # # Le `Phraser` est une version *light* du `Phrases`, plus optimale pour transformer les phrases en concaténant les bigrams. bigram_phraser = Phraser(phrases_model=bigram_phrases) # Le `Phraser` est un objet qui converti certains unigrams d'une liste en bigrams lorsqu'ils ont été identifiés comme pertinents. # %time bigram_phraser[sentences[78]] # # Extraction des trigrams # Nous répétons l'opération en envoyant cette fois la liste de bigrams afin d'extraire les trigrams. trigram_phrases = Phrases(bigram_phraser[sentences]) trigram_phraser = Phraser(phrases_model=trigram_phrases) # # Création d'un corpus d'unigrams, bigrams, trigrams corpus = list(trigram_phraser[bigram_phraser[sentences]]) # # Entrainement d'un modèle Word2Vec sur ce corpus # %%time model = Word2Vec( corpus, # On passe le corpus de ngrams que nous venons de créer size=32, # Le nombre de dimensions dans lesquelles le contexte des mots devra être réduit, aka. vector_size window=5, # La taille du "contexte", ici 5 mots avant et après le mot observé min_count=5, # On ignore les mots qui n'apparaissent pas au moins 5 fois dans le corpus workers=4, # Permet de paralléliser l'entraînement du modèle en 4 threads iter=5 # Nombre d'itérations du réseau de neurones sur le jeu de données pour ajuster les paramètres avec la descende de gradient, aka. epochs. ) # ## Remarque # # Vous voyez ici que l'entrainement du modèle est parallélisé (sur 4 workers). # # Lors qu'on parallélise l'entrainement du modèle, 4 modèles "séparés" sont entrainés sur environ un quart des phrases. # # Ensuite, les résultats sont agrégés pour ne plus faire qu'un seul modèle. # # On ne peut prédire quel worker aura quelle phrase, car il y a des aléas lors de la parallélisation (p. ex. un worker qui serait plus lent, etc.). # # Du coup, les valeurs peuvent varier légèrement d'un entrainement à l'autre. # # Mais, globalement, les résultats restent cohérents. outfile = f"../data/bulletins.model" model.save(outfile)
module4/s4_model_builder.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} from collections import namedtuple import pathlib import random # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sns # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} from orchid import (project as op, project_loader as opl, reference_origins as origin) # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} import toolz.curried as toolz # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} from Orchid.FractureDiagnostics import WellReferenceFrameXy, DepthDatum from Orchid.FractureDiagnostics.RatioTypes import ProppantConcentration, SlurryRate import UnitsNet from System import Array, Int32 # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} project_filenames = { 'bakken': 'frankNstein_Bakken_UTM13_FEET.ifrac', 'montney': 'Project-frankNstein_Montney_UTM13_METERS.ifrac', } # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} test_data_path = pathlib.Path('c:/src/Orchid.IntegrationTestData/') project_path_names = toolz.valmap(lambda fn: test_data_path.joinpath(fn), project_filenames) project_path_names # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} projects = toolz.valmap( lambda pn: opl.ProjectLoader(str(pn)).native_project(), project_path_names) projects # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} project_units = toolz.valmap(lambda p: p.ProjectUnits, projects) project_units # + @toolz.curry def to_project_unit(unit, nq): return nq.ToUnit(unit) @toolz.curry def net_quantity_to_tuple(nq): return nq.Value, nq.Unit # + def get_project_wells(proj): return proj.Wells.Items def collect_well_names(wells): return {w.Name: w for w in wells} project_wells = toolz.pipe( projects, toolz.valmap(get_project_wells), toolz.valmap(collect_well_names), ) project_wells # + @toolz.curry def summarize_well_measurements(well): return {'KB above ground': str(well.KellyBushingHeightAboveGroundLevel), 'GL above sea level': str(well.GroundLevelElevationAboveSeaLevel,),} well_measurements = toolz.pipe(project_wells, toolz.valmap(toolz.valmap(summarize_well_measurements))) well_measurements # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} frames = { WellReferenceFrameXy.AbsoluteStatePlane: 'Plane', WellReferenceFrameXy.Project: 'Project', WellReferenceFrameXy.WellHead: 'Well', } datums = { DepthDatum.GroundLevel: 'Ground', DepthDatum.KellyBushing: 'Kelly', DepthDatum.SeaLevel: 'Sea', } # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} selected_field_name = 'bakken' # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} selected_well_name = 'Demo_1H' # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} selected_frame = WellReferenceFrameXy.WellHead selected_datum = DepthDatum.KellyBushing # - selected_well = toolz.get_in([selected_field_name, selected_well_name], project_wells) selected_well selected_well_md_kb_values = selected_well.Trajectory.GetMdKbArray() len(selected_well_md_kb_values) # + trajectory_bounds_measurements = toolz.pipe( selected_well_md_kb_values, toolz.get([0, -1]), ) toolz.pipe(trajectory_bounds_measurements, toolz.map(str), list) # + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"} trajectory_bounds = toolz.pipe(trajectory_bounds_measurements, toolz.map(lambda v: v.Value), list) trajectory_bounds # + def sample(bounds): return random.uniform(*bounds) sample_size = 4 md_kb_at = toolz.pipe(range(sample_size), toolz.map(lambda _: sample(trajectory_bounds)), sorted, list) md_kb_at # + def make_net_quantity_value(v): return UnitsNet.QuantityValue.op_Implicit(v) def make_net_feet(v): return UnitsNet.Length.FromFeet(make_net_quantity_value(v)) def make_net_meters(v): return UnitsNet.Length.FromMeters(make_net_quantity_value(v)) @toolz.curry def make_net_quantity(make_quantity_func, value): return make_quantity_func(value) net_quantity_func = make_net_feet sample_md_kb_measurements = toolz.pipe( md_kb_at, toolz.map(make_net_quantity(net_quantity_func)), list ) toolz.pipe( sample_md_kb_measurements, toolz.map(str), list, ) # - sample_values = Array[UnitsNet.Length](sample_md_kb_measurements) for i in range(len(sample_values)): print(str(sample_values[i])) SimplePoint = namedtuple('SimplePoint', ['x', 'y', 'z']) raw_points = selected_well.GetLocationsForMdKbValues(sample_values, selected_frame, selected_datum) points = toolz.pipe( raw_points, toolz.map(lambda p: SimplePoint(p.X.ToUnit(project_units[selected_field_name].LengthUnit), p.Y.ToUnit(project_units[selected_field_name].LengthUnit), p.Depth.ToUnit(project_units[selected_field_name].LengthUnit))), list, ) for point in points: print(f'SubsurfacePoint({str(point.x)}, {str(point.y)}, {str(point.z)})') # + def abbreviation(u): return UnitsNet.Length.GetAbbreviation(u) def format_length(v): return f'{v.Value:#.4g} {abbreviation(v.Unit)}' @toolz.curry def summarize_point(e): sample_measurement, point = e return { 'field': selected_field_name, 'well': selected_well_name, 'mb_kb': f'{format_length(sample_measurement)}', 'frame': frames[selected_frame], 'datum': datums[selected_datum], 'x': f'{format_length(point.x)}', 'y': f'{format_length(point.y)}', 'z': f'{format_length(point.z)}', } point_summary = toolz.pipe( zip(sample_md_kb_measurements, points), toolz.map(summarize_point), list, ) point_summary # - pd.DataFrame(data=point_summary) # + whl_data = [] def combine_data(whl_loc): return str(whl_loc.Value) + ' ' + abbreviation(whl_loc.Unit) for proj_name in project_wells: for wellname, well in project_wells[proj_name].items(): whl = well.WellHeadLocation whl_data.append((proj_name, wellname, combine_data(whl[0]), combine_data(whl[1]), combine_data(whl[2]))) whl_df = pd.DataFrame(whl_data, columns=['Project', 'Well', 'Easting', 'Northing', 'Depth']) whl_df.head(whl_df.shape[0]) # + pycharm={"name": "#%%\n"}
features/notebooks/well_measurements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Data Reading, Cleaning, and Organizing # imports import numpy as np import matplotlib.pyplot as plt import pandas as pd import random # **Reading the Data** # reading aisles dataset aisles = pd.read_csv('data/aisles.csv') # reading department dataset department = pd.read_csv('data/departments.csv') # reaading random 5% of order_prior dataset p = 0.05 order_prior = pd.read_csv('data/order_products__prior.csv', header = 0, skiprows=lambda i: i>0 and random.random() > p) # reading random 5% of the orders dataset p = 0.05 orders = pd.read_csv('data/orders.csv', header = 0, skiprows=lambda i: i>0 and random.random() > p) # reading in products data products = pd.read_csv('data/products.csv') # reading random 5% of the train order dataset p = 0.05 order_train = pd.read_csv('data/order_products__train.csv', header = 0, skiprows=lambda i: i>0 and random.random() > p) aisles.head(2) department.head(2) order_prior.head(2) orders.head(2) products.head(2) order_train.head(2) order_prior.shape orders.shape order_train.shape order_prior.shape # check the orders dataset for null values orders.isnull().sum() # Missing values in the 'days since prior order' column mean that these customers did not make prior purchases. orders.dropna(axis = 0, inplace = True) # delete rows with missing values # **Split Orders Dataframe into Prior, Train, and Test Dataframes** orders['eval_set'].value_counts(normalize = True) # breakdown of orders dataframe # create filter to separate test orders orders_test_filtered = orders['eval_set'] == 'test' # apply filter to orders dataframe orders_test = orders[orders_test_filtered] # save test orders to csv file for modeling orders_test.to_csv('data/orders_test.csv') # create filter to separate prior orders orders_filtered_prior = orders['eval_set'] == 'prior' # apply filter to a dataframe orders_prior_details = orders[orders_filtered_prior] # create filter to separate train orders orders_filtered_train = orders['eval_set'] == 'train' # apply filter to a dataframe orders_train_details = orders[orders_filtered_train] # **Merge products and order_prior dataframes** prod_orders_prior = pd.merge(products, order_prior, on = 'product_id') # marge products and orders prod_orders_prior.head(2) prod_orders_prior.shape # check if all rows remain len(prod_orders_prior['order_id'].unique()) # **Merge products and order_train dataframes** prod_orders_train = pd.merge(products, order_train, on = 'product_id') # merge products and train orders prod_orders_train.shape # **Concat prod_orders_train and prod_order_prior dataframes** orders_train_prior = pd.concat([prod_orders_train, prod_orders_prior], ignore_index = True, sort = False) orders_train_prior.shape # **Add quantity column** # create quantity column reflecting number of products orders_train_prior.loc[orders_train_prior['product_name'].notna(), 'quantity'] = 1 # lowercase product names orders_train_prior['product_name'] = orders_train_prior['product_name'].str.lower() # take out commas in the product name orders_train_prior['product_name'] = orders_train_prior['product_name'].str.replace(",","") # save resulting dataframe into a csv file orders_train_prior.to_csv('data/orders_train_prior.csv', index = False) # **Merge prod_orders_prior, orders_prior_details dataframes** all_orders_prior = pd.merge(prod_orders_prior, orders_prior_details, on = 'order_id') # lowercase product names all_orders_prior['product_name'] = all_orders_prior['product_name'].str.lower() # take out commas in the product names all_orders_prior['product_name'] = all_orders_prior['product_name'].str.replace(",","") # save the dataframe into a csv file all_orders_prior.to_csv('data/all_orders_prior.csv', index = False) # **Merge prod_orders_train, prod_orders_prior and all orders dataframes** all_orders_train = pd.merge(prod_orders_train, orders_train_details, on = 'order_id') # lowercase product names all_orders_train['product_name'] = all_orders_train['product_name'].str.lower() # take out commas in the product name column all_orders_train['product_name'] = all_orders_train['product_name'].str.replace(",","") # save the dataframe into a csv file all_orders_train.to_csv('data/all_orders_train.csv', index = False) # **Concat all train and prior orders** all_orders = pd.concat([all_orders_train, all_orders_prior], ignore_index = True, sort = False) # **Add quantity column to all_orders dataframe** # create quantity column reflecting number of products all_orders.loc[all_orders['product_name'].notna(), 'quantity'] = 1 # **Merge aisles, department, and all_orders dataframes** all_orders = pd.merge(all_orders, aisles, on = 'aisle_id') all_orders = pd.merge(all_orders, department, on = 'department_id') all_orders['product_name'] = all_orders['product_name'].str.lower() # lowercase product names all_orders['product_name'] = all_orders['product_name'].str.replace(",","") # take out commas in the product names # save the dataframe to a csv file all_orders.to_csv('data/all_orders.csv', index = False)
cleaning/reading_cleaning_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: p36 # language: python # name: p36 # --- # # 03.02 - SUPERVISED ALGORITHMS # !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py import init; init.init(force_download=False); init.get_weblink() from sklearn.datasets import * import numpy as np from local.lib import mlutils from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt # %matplotlib inline from IPython.display import Image # ## SUPERVISED ALGORITHMS # # - We are given $X$ and $y$ # from sklearn.tree import DecisionTreeClassifier X,y = make_moons(400, noise=0.1) X.shape, y.shape ## KEEPOUTPUT plt.scatter(X[y==0][:,0], X[y==0][:,1], color="red", label="class 0") plt.scatter(X[y==1][:,0], X[y==1][:,1], color="blue", label="class 1") plt.legend(); plt.grid(); X[:10] y[:10] # ## Clasificador Lineal from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X,y) lr.score(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(lr.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) plt.xlabel("col_0"); plt.ylabel("col_1"); # ## Arboles de decision dt = DecisionTreeClassifier(max_depth=2) dt.fit(X,y) y_preds = dt.predict(X) dt.score(X,y) ## KEEPOUTPUT dt = DecisionTreeClassifier(max_depth=3); dt.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(dt.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) plt.xlabel("col_0"); plt.ylabel("col_1"); # - **samples**: nb of data points in the split # - **value**: nb samples of each class in the split # - **class**: class assigned to the split (the majority class in **value**) ## KEEPOUTPUT from sklearn.tree import plot_tree plt.figure(figsize=(10,5)) plot_tree(dt, feature_names=["col_0", "col_1"], class_names=["red", "blue"], rounded=True); # + ## KEEPOUTPUT dt = DecisionTreeClassifier(max_depth=10); dt.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(dt.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) plt.xlabel("col1"); plt.ylabel("col2"); # - ## KEEPOUTPUT plt.figure(figsize=(15,8)) plot_tree(dt, feature_names=["col_0", "col_1"], class_names=["red", "blue"], rounded=True); X,y = make_circles(400, noise=0.05) # + ## KEEPOUTPUT dt = DecisionTreeClassifier(max_depth=10); dt.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(dt.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) plt.xlabel("col1"); plt.ylabel("col2"); # - ## KEEPOUTPUT plt.figure(figsize=(15,8)) plot_tree(dt, feature_names=["col_0", "col_1"], class_names=["red", "blue"], rounded=True); # ## Random forests # + ## KEEPOUTPUT from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(n_estimators=10, max_depth=10) rf.fit(X,y) print (rf.score(X,y),rf.score(X,y)) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(rf.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) # + ## KEEPOUTPUT from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier(max_depth=10, n_estimators=100) rf.fit(X,y) print (rf.score(X,y)) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(rf.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) # - # ## Naive Bayes ## KEEPOUTPUT from sklearn.naive_bayes import GaussianNB gb = GaussianNB() gb.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(gb.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) ## KEEPOUTPUT X,y = make_circles(300, noise=.1) gb = GaussianNB() gb.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(gb.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) # + ## KEEPOUTPUT X,y = make_blobs(300, centers=2, cluster_std=2) X, y = make_moons(400, noise=0.1) gb = GaussianNB() gb.fit(X,y) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(gb.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None) # - # observa que si alguna de las variables no es independiente el método falla ## KEEPOUTPUT mc = mlutils.Example_Bayes2DClassifier(mean0=[2.5, 2.5], cov0=[[.9, .9], [0.8, 1.1]], mean1=[1, 2.5], cov1=[[0.5,.8], [0.4,0.9]]) X,y = mc.sample(200) mlutils.plot_2Ddata_with_boundary(mc.predict, X, y, line_width=3, line_color="green", dots_alpha=.3) plt.title(" $\hat{\epsilon}=%.3f$"%mc.score(X,y)+" $\epsilon=%.3f$"%mc.analytic_score()); plt.grid(); ## KEEPOUTPUT gb.fit(X,y) print (gb.score(X,y)) mlutils.plot_2Ddata(X, y, dots_alpha=.3) mlutils.plot_2D_boundary(gb.predict, np.min(X, axis=0), np.max(X, axis=0), line_width=3, line_alpha=.7, label=None)
content/NOTES 03.02 - SUPERVISED ALGORITHMS.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Number letter counts # # # # If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. # # If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? # # NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. # # + # %%time lowertext = """one two three four five six seven eight nine ten eleven twelve thirteen fourteen fifteen sixteen seventeen eighteen nineteen""" highertext = """ten twenty thirty forty fifty sixty seventy eighty ninety hundred thousand""" lowerletters = [0] higherletters = [0] for word in lowertext.split(): lowerletters.append(len(word)) for word in highertext.split(): higherletters.append(len(word)) def two_digits(i): i = i%100 nletters = 0 if i < 20: # e.g. thirteen nletters = nletters + lowerletters[i] else: # e.g. twenty one nletters = nletters + higherletters[i//10] + lowerletters[i%10] return nletters def hundreds(i): i = i%1000 if i == 0: return 0 elif i%100 == 0: return lowerletters[i//100] + higherletters[10] else: return lowerletters[i//100] + higherletters[10] + 3 def thousands(i): i = i%10000 return lowerletters[i//1000] + higherletters[11] nletters = 0 for i in range(1,1001): if i < 100: # e.g. twenty one nletters = nletters + two_digits(i) elif i < 1000: # e.g. two hundred and thirty four nletters = nletters + hundreds(i) + two_digits(i) else: # e.g. one thousand three hundred and fourty nine nletters = nletters + thousands(i) + hundreds(i) + two_digits(i) print(nletters) # -
0001/Problem_17.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # manage utils dir # + print(__doc__) # =========================================================================== # # Imports # =========================================================================== # # Critical Imports # --------------------------------------------------------------------------- # import warnings; warnings.filterwarnings("ignore") # Imports through 'from' syntax # --------------------------------------------------------------------------- # from itertools import islice; from pprint import pprint; from sklearn import preprocessing; # Standard Imports # --------------------------------------------------------------------------- # import copy; import os; import sys; import shutil; import time; # Imports through 'as' syntax # --------------------------------------------------------------------------- # import numpy as np; import pandas as pd; # Imports for handling graphics # --------------------------------------------------------------------------- # # %matplotlib inline # Matplotlib pyplot provides plotting API import matplotlib as mpl from matplotlib import pyplot as plt import chart_studio.plotly.plotly as py import seaborn as sns; sns.set() # - # Custom Imports # --------------------------------------------------------------------------- # from utils_stats.project_stats_utils import * from utils_stats.fit_utils import * # Set seed for code repeability # --------------------------------------------------------------------------- # np.random.seed(0) # ## Fetch datasets # GLOBAL VARS # --------------------------------------------------------------------------- # ROOT_DIR_PATH = ".." resources_list = dir_traversal_by_os_walk(root_dir_path=ROOT_DIR_PATH, verbose=0) df = get_df_from_list_of_os_walk(resources_list) df.head(5) df_indexed = get_df_from_list_of_os_walk_numeric_indexed(resources_list=resources_list, columns="dirs,files".split(",")) assert df_indexed is not None, "df_indexed is None" df_indexed.info() df_indexed.head(5) criteria = (df_indexed["dirs"] != 0) & (df_indexed["files"] != 0) df_indexed[criteria] df_indexed_v2 = get_df_from_list_of_os_walk_numeric_indexed_v2(resources_list=resources_list) assert df_indexed_v2 is not None, "df_indexed_v2 is None" df_indexed_v2.info() df_indexed_v2.head(5) # sns.pairplot(df_indexed_v2, hue='???', size=1.5) sns.pairplot(df_indexed_v2, height=1.5) # ## Perform Machine Learning criteria = (df_indexed_v2["dirs"] != 0) & (df_indexed_v2["files"] != 0) # df_tmp = df_indexed[criteria] df_tmp = df_indexed_v2 y = df_tmp["files"].values X = df_tmp[["dirs", "dirs_size"]].values # + # ax = sns.scatterplot(x="dirs", y="files", data=df_tmp) # - linear_regression_custom(X[:, 0], y, randomize_data=True) err_list = classifier_comparison(X, y, start_clf=0, stop_clf=10, verbose=0, record_errors=True) pprint(err_list) # ## Show Some Graphs (Histograms & Pie Charts) df = get_df_from_list_of_os_walk_numeric(resources_list=resources_list) assert df is not None, "df is None" df.info() # print(df.head(df.shape[0])) df.head(5) df[["dirs", "files"]].agg(['sum', 'max', 'min', 'mean', 'std']) df[["dirs", "files"]].describe(include='all') res_stats = df[["dirs", "files"]].describe(include='all') loc_max: int = res_stats.index.get_loc('max') loc_min: int = res_stats.index.get_loc('min') max_val = int(max(res_stats.loc[res_stats.index[loc_max]])) min_val = int(min(res_stats.loc[res_stats.index[loc_min]])) bins = range(min_val, max_val) df[["dirs", "files"]].hist(bins=bins) type(res_stats.index) def file2ext(file_name: str) ->str: tmp_res: str = os.path.basename(file_name) filename, file_extension = os.path.splitext(tmp_res) if len(file_extension) == 0: return "-" return file_extension df = get_df_from_list_of_os_walk(resources_list=resources_list, columns="root,dirs,files,files size", verbose=0) df.info() # type(df[["files"]].applymap(file2ext)) pd.unique(df[["files"]].applymap(file2ext)["files"].values) # type(df[["files"]].applymap(file2ext)["files"].value_counts()) df[["files"]].applymap(file2ext)["files"].value_counts().to_frame().T ext_df: pd.DataFrame = df[["files"]].applymap(file2ext) meta_data_img = { 'title': 'Hist', 'ylabel': 'Freq', 'xlabel': 'Ext' } show_pie_by_variable_from_df(ext_df, variable_name="files", fig_name="pie_chart.png", meta_data_img=meta_data_img) meta_data_img = { 'title': 'Hist', 'ylabel': 'Freq', 'xlabel': 'Ext' } show_histogram_by_variable_from_df(ext_df, variable_name="files", fig_name="plain_hist.png", meta_data_img=meta_data_img) meta_data_img = { 'title': f'Hist | Normalize', 'ylabel': 'Freq', 'xlabel': 'Ext' } show_histogram_by_variable_from_df(ext_df, variable_name="files", rescale_data_tech="Normalize", fig_name="pie_chart.png", meta_data_img=meta_data_img) meta_data_img = { 'title': 'Hist | MinMaxScaler', 'ylabel': 'Freq', 'xlabel': 'Ext' } show_histogram_by_variable_from_df(ext_df, variable_name="files", rescale_data_tech="MinMaxScaler", fig_name="pie_chart.png", meta_data_img=meta_data_img) meta_data_img = { 'title': 'Hist | StandardScaler', 'ylabel': 'Freq', 'xlabel': 'Ext' } show_histogram_by_variable_from_df(ext_df, variable_name="files", rescale_data_tech="StandardScaler", fig_name="pie_chart.png", meta_data_img=meta_data_img) show_histograms_by_scaler_tech( ext_df, variable_name="files", # rescale_data_techs=["StandardScaler", "MinMaxScaler",], fig_name="all_hists_chart.png") rescale_data_techs=["StandardScaler", "MinMaxScaler", "Normalize"], figsize=(10, 10), fig_name="all_hists_chart.png", show_default=True) # ## References: # # ## Scikit-Learn: # # ### Objects: # - (StandardScaler) https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html # # ### Examples: # Some useful examples: # - (Ordinary Least Squares and Ridge Regression Variance) https://scikit-learn.org/stable/auto_examples/linear_model/plot_ols_ridge_variance.html#sphx-glr-auto-examples-linear-model-plot-ols-ridge-variance-py # - (Classifier comparison) https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#sphx-glr-auto-examples-classification-plot-classifier-comparison-py # #
pittsburgh-bridges-data-set-analysis/project_stats/tempalte_project_dataset_stats-v1.0.0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Nota: se requiere revisar el material referente a Python y los videos de Algoritmos y Estructuras de Datos # # Estructura de datos: Diccionarios # <div style="text-align: right"> # Autores: <NAME>, <NAME>, <NAME>, <NAME> # </div> # # Los diccionarios en Python estan implementados como tablas hash. # # Como se vió en notebooks anteriores, la sintaxis es mediante llaves y `:`, así como también mediante la función `dict`. # + d = {'nombre' : 'Juan', 'edad' : 22, 'numeros':[1,2,3]} print(d) d2 = dict(nombre='Pedro', edad=28) print(d2) # - # Internamente, el interprete de Python creará una tabla hash, asociando la llave con su valor. # # Recuperar valores # Para acceder a los valores asociados a cada llave, el procedimiento es acceder, claro esta, por medio de la llave. El primer paso es determinar la posición o celda dentro de la tabla hash donde se encuentra o debe encontrarse el par llave-valor. Para esto, se aplica la función `hash` a la llave, la cual mapea dicha llave a un número entero. Esta función es determinista, i.e., para una misma llave siempre regresará el mismo valor, y para asegurar el desempeño, la distribución de ese valor no esta asociado con la distribución misma de los datos. En general, el hash puede ser un número muy largo, e incluso negativo, por lo que debe mapearse a una celda válida en la tabla hash (diccionario); éste último mapeo puede ser tan simple como el residuo de la división del valor hash entre el tamaño de la tabla o una transformación lineal para encontrar el índice en la tabla; y la estrategía precisa dependerá de las características de los valores mapeados. La esperanza es que cada celda en la tabla tenga ningún o un número constante (y pequeño) de valores asignados. Con esto también podemos inferir que se siempre se requerirá una tabla con muchas más celdas que datos asignados. print(d2["nombre"]) # ### La función `hash` se puede aplicar sobre valores inmutables y los mapeará a un entero, y ese valor de entero es poco probable que aparezca para otras llaves diferentes. edadvar = "edad" edadvar1 = "edad1" print("hash-edad-literal: {}\nhash-edad-variable: {}".format(hash('edad'), hash(edadvar)) # ### Aun los datos con pequeñas variaciones deberían ser mapeados a diferentes enteros, e incluso sin realación entre ellos edadvar1 = "1111111111111edad" edadvar2 = "1111111111111edad0" print("hash-edad1: {}\nhash-edad2: {}".format(hash(edadvar1), hash(edadvar2))) # ## Asignar valores # Al asignar valores, se ejecuta la función `hash` sobre la llave; y se coloca en la posición indicada (después de pasarlo a un índice valido en la tabla); es posible que la tabla tenga que crecer si la llave no existía. Hayq que tener en cuenta que si hay colisiones, es decir, que otras llaves diferentes sean mapeados a la misma celda de la tabla hash, entonces se debe aplicar algún mecánismo de solución de la colisión. d = {'nombre' : 'Juan', 'edad' : 22} print(d) d["edad"] = 25 print(d) d["nacionalidad"] = "MX" print(d) # ## Borrar pares-valor # # Para borrar una entrada en la tabla hash, primero se debe determinar en que posición se encuentra dicho valor, para esto internamente se aplica la función `hash` y se mapea a una celda valida, luego si existe se procederá a borrar la referencia del par llave-valor. Es posible que la tabla se reduzca de tamaño. print(d) del d['edad'] print(d) # # Membresía # Verificar si una llave existe dentro del diccionario es muy similar a un acceso a los datos, sin embargo, solo contestará con un valor booleano en lugar de acceder a los datos. 'f' in d4 # # Conteo # # Los diccionarios son una estructura fundamental en Python, y habrá muchas tareas que se pueden resolver con ellos. # # Por ejemplo, la ocurrencia de cada cáracter en un texto se puede contar como sigue: # + s = "Hola mundo hola todos" dat={} for c in s: if c in dat: dat[c] = dat[c] + 1 else: dat[c] = 1 print(dat) print(list(dat.keys())) print(list(dat.values())) # - # ### Por ejemplo, en el paquete `collections` se encuentran los contadores, que adicionalmente tienen otras funciones asociadas from collections import Counter cnt = Counter(s) print(cnt) print(cnt.most_common(3)) # ### Podemos graficar las frecuencias como un histograma # + # %matplotlib inline import matplotlib.pyplot as plt k=list(dat.keys()) v=list(dat.values()) print(k,v) x_pos = [i for i, _ in enumerate(k)] plt.xticks(x_pos, k) plt.bar(x_pos, v, align='center') # - # # Conjuntos: set # Un conjunto se representa como un diccionario sin valor asociado, esto quiere decir que funciona por medio de la función `hash` y que cualquier valor _hasheable_ puede ser utilizado en el diccionario. A = set(['aaa', '1', 'b', 3, '4', 'c', 'a','a','a']) # inicializando desde una colección B = set(['aa', '1', 'bb', 3, '5', 'c', 'a']) print("A:", A) print("B:", B) # ### Recordando, al igual que los diccionarios, los conjuntos no soportan valores duplicados AA = set("anita lava la tina") print(AA) # ### Los conjuntos soportan operaciones de unión e intersección eficientes; la eficiencia viene del uso de tablas hash en la implementación. print("unión:", A.union(B)) print("intersección:", A.intersection(B)) print("diferencia:", A.difference(B)) print("membresía {} in {}: {}".format('aa', A, 'aa' in A)) print("membresía {} in {}: {}".format('aa', B, 'aa' in B)) # # Uso de estructuras para cálculo de similitudes # # Existen muchos algoritmos para análisis de datos que útilizan el concepto de función de similitud, esto es, de funciones que miden que tanto se parecen dos objetos entre sí. A continuación veremos como se pueden implementar algunas funciones de similitud utilizando las estructuras que se han visto. # ### Coeficiente de Jaccard # $$J(A,B) = \frac{|A \cap B|}{|A \cup B|}$$ # + def jaccard(A, B): tI=len(A.intersection(B)) tU=len(A.union(B)) return tI/tU print(jaccard(set("casuchas"), set("casitas"))) print(jaccard(set("casuchas"), set("casuchas"))) print(jaccard(set("casuchas"), set("cartera"))) # - # ### Coeficiente de Dice # $$D(A,B) = \frac{ 2 \times |A \cap B|}{|A| + |B|}$$ # + def dice(A, B): tI=len(A.intersection(B)) tU=len(A.union(B)) return tI/(len(A) + len(B)) print(dice(set("casuchas"), set("casitas"))) print(dice(set("casuchas"), set("casuchas"))) print(dice(set("casuchas"), set("cartera"))) # - # # Analizando el vocabulario de algunos libros del proyecto Gutenberg # El análisis de colecciones de documentos no estructurados es una de las tareas de la ciencia de datos; en particular, una tarea relacionada con procesamiento de lenguaje natural. # Como ejemplo, se utilizarán libros del proyecto [Gutenberg](http://www.gutenberg.org). # # Lectura del texto en memoria: # + with open("celestina.txt", 'r', encoding='utf8') as f: text = f.readlines() f.close() text[:2] # - # ## Preprocesamiento de los datos # Una etapa fundamental para el análisis es el preprocesamiento, que es un tratamiento a los datos para prepararlos como entradas a métodos más complejos. # + # remover lineas vacias text2=[] with open("celestina.txt", 'r', encoding='utf8') as f: for l in f.readlines(): if not l.strip(): continue if l.strip().startswith('#'): continue text2.append(l.strip().split()) f.close() text2[:3] # - # ## Distribución de los símbolos y conteo de términos # La distribución de los símbolos es una propiedad de un texto que se puede obtenre de la siguiente forma: # # + from collections import Counter tt=[] for t in text2: for w in t: for c in w: tt.extend(c.lower()) print(tt[:10]) di=Counter(tt) print(di.most_common(10)) # - # ### Histograma de los símbolos en _La Celestina_ # + # %matplotlib inline import matplotlib.pyplot as plt k = sorted(di.items(), key=lambda x: x[-1], reverse=True) v = [_[-1] for _ in k] k = [_[0] for _ in k] x_pos = [i for i, _ in enumerate(k)] plt.xticks(x_pos, k) plt.bar(x_pos, v, align='center') # - # ## Términos más frecuentes # El vocabulario de un texto, palabras, se puede generar como sigue: # + text2=[] with open("celestina.txt", 'r', encoding='utf8') as f: for l in f.readlines(): if not l.strip(): continue if l.strip().startswith('#'): continue text2.append(l.strip().split()) f.close() text2[0:2] # + ### from collections import Counter tt=[] for t in text2: for w in t: tt.append(w.lower()) di=Counter(tt) di.most_common(30) # + ### Graficando los 50 términos más comunes # - k = di.most_common(50) v = [_[-1] for _ in k] k = [_[0] for _ in k] x_pos = [i for i, _ in enumerate(k)] plt.xticks(x_pos, k, rotation=45) plt.bar(x_pos, v, align='center') s1="vudú" s1.replace('ú','u') # # Ejercicios # - Calcular el vocabulario del libro _Moby Dick_ (archivo `moby.txt`). # - Graficar las 1000 palabras más frecuentes. # - Calcular y graficar el histograma de las palabras por su longitud.
06-Hash-Sets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Conda Def) # language: python # name: py3def # --- # # Stochastic quasi-Newton optimization # # This is an example IPython notebook showing the usage of the [stochQN](https://www.github.com/david-cortes/stochQN) package on the BibTeX dataset, which can be found under the [Extreme Classification Repository](http://manikvarma.org/downloads/XC/XMLRepository.html). # # ### Parsing the data: # + import numpy as np, pandas as pd, re from sklearn.model_selection import train_test_split from collections import defaultdict from sklearn.preprocessing import MultiLabelBinarizer from scipy.sparse import coo_matrix, csr_matrix def parse_data(file_name, return_wide=True, return_sp=False, convert_sp=False): features = list() labels = list() with open(file_name,'rt') as f: f.readline() for l in f: if bool(re.search("^[0-9]",l)): g = re.search("^(([0-9]{1,2},?)+)\s(.*)$",l) labels.append([int(i) for i in g.group(1).split(",")]) features.append(eval("{" + re.sub("\s", ",", g.group(3)) + "}")) else: l = l.strip() labels.append([]) features.append(eval("{" + re.sub("\s", ",", l) + "}")) if not return_sp: features = pd.DataFrame.from_dict(features).fillna(0).values else: if convert_sp: features = np.array([(i,k,v) for i in range(len(features)) for k,v in features[i].items()]) features = coo_matrix((features[:,2], (features[:,0].astype('int32'), features[:,1].astype('int32')))) features = csr_matrix(features) if return_wide: mlb = MultiLabelBinarizer() y = mlb.fit_transform(labels) return features, y else: return features, labels X, Y = parse_data("Bibtex_data.txt", return_wide=True) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=1) print(X_train.shape) print(Y_train.shape) print(X_test.shape) print(Y_test.shape) # - # ### Optimizers and parameters: # + from stochqn import oLBFGS, adaQN from sklearn.linear_model.logistic import _multinomial_loss_grad ### Functions for optimization def grad_fun(w, X, Y, sample_weight=None, reg_param=None): if sample_weight is None: sample_weight = np.ones(X.shape[0]) return _multinomial_loss_grad(w, X, Y, reg_param, sample_weight)[1] def obj_fun(w, X, Y, sample_weight=None, reg_param=None): if sample_weight is None: sample_weight = np.ones(X.shape[0]) return _multinomial_loss_grad(w, X, Y, reg_param, sample_weight)[0] ### Random initialization for the weights np.random.seed(1) w0 = np.random.normal(size = (X.shape[1] + 1) * Y.shape[1]) w_sgd = w0.copy() w_rmsprop = w0.copy() ### L2 regularization strength reg_param = 1e-1 ### Shuffling the data random_order = np.argsort(np.random.random(size = X_train.shape[0])) X_train = X_train[random_order] Y_train = Y_train[random_order] ### Step sizes for each optimizer - these haven't been tuned very well step_size_sgd = 1e-1 step_size_rmsprop = 1e-2 step_size_olbfgs = 1e-2 step_size_adaqn = 1e-1 ### Other parameters for RMSProp reg_rmsprop = 1e-4 w_div_rmsprop = 0.9 sum_grad_sq_rmsprop = np.zeros(w_rmsprop.shape[0]) ### Initializing optimizers from this package adaqn = adaQN(x0=w0.copy(), step_size=step_size_adaqn, grad_fun=grad_fun, obj_fun=obj_fun, decr_step_size=None, rmsprop_weight=0.9) olbfgs = oLBFGS(x0=w0.copy(), step_size=step_size_olbfgs, grad_fun=grad_fun, obj_fun=obj_fun, decr_step_size=None) # - # ### Running different optimizers on the same batches of data: # + lst_accessed_data_points = list() lst_valset_loss_sgd = list() lst_valset_loss_rmsprop = list() lst_valset_loss_olbfgs = list() lst_valset_loss_adaqn = list() test_weights = np.ones(X_test.shape[0]) batch_size = 50 nbatches = int(np.ceil(X_train.shape[0] / batch_size)) for batch in range(nbatches): ### Data for this batch batch_start = batch * batch_size batch_end = min(X_train.shape[0], (batch + 1) * batch_size) X_batch = X_train[batch_start : batch_end] Y_batch = Y_train[batch_start : batch_end] s_weigh = np.ones(X_batch.shape[0]) ### Gradients for optimizers outside of this package gradient_batch_sgd = grad_fun(w_sgd, X_batch, Y_batch, s_weigh, reg_param) gradient_batch_rmsprop = grad_fun(w_rmsprop, X_batch, Y_batch, s_weigh, reg_param) #### Stochastic gradient descent update w_sgd -= step_size_sgd * gradient_batch_sgd #### RMSProp update sum_grad_sq_rmsprop = w_div_rmsprop * sum_grad_sq_rmsprop + (1 - w_div_rmsprop) * gradient_batch_rmsprop**2 w_rmsprop -= step_size_rmsprop * gradient_batch_rmsprop / np.sqrt(sum_grad_sq_rmsprop + reg_rmsprop) ### Stochastic quasi-Newtons adaqn.partial_fit(X_batch, Y_batch, s_weigh, additional_kwargs={"reg_param":reg_param}) olbfgs.partial_fit(X_batch, Y_batch, s_weigh, additional_kwargs={"reg_param":reg_param}) lst_accessed_data_points.append( X_batch.shape[0] ) lst_valset_loss_sgd.append( obj_fun(w_sgd, X_test, Y_test, test_weights, reg_param) ) lst_valset_loss_rmsprop.append( obj_fun(w_rmsprop, X_test, Y_test, test_weights, reg_param) ) lst_valset_loss_adaqn.append( obj_fun(adaqn.x, X_test, Y_test, test_weights, reg_param) ) lst_valset_loss_olbfgs.append( obj_fun(olbfgs.x, X_test, Y_test, test_weights, reg_param) ) # - # ### Plotting the results: # + # %matplotlib inline import matplotlib.pyplot as plt from pylab import rcParams loss_comp = pd.DataFrame({ "Accessed Data Points" : lst_accessed_data_points, "SGD" : lst_valset_loss_sgd, "RMSProp" : lst_valset_loss_rmsprop, "oLBFGS" : lst_valset_loss_olbfgs, "adaQN" : lst_valset_loss_adaqn, }) txt_plot = "Logistic Regression fit in batches\nBibTeX dataset\n(" txt_plot += "{:,}".format(X_train.shape[0]) + " samples, " + str(X_train.shape[1]) + " features, " txt_plot += str(Y.shape[1]) + " classes)\n" rcParams['figure.figsize'] = 12, 8 lwd = 4 ax = plt.subplot(111) plt.plot(loss_comp["SGD"], linewidth = lwd) plt.plot(loss_comp["RMSProp"], linewidth = lwd) plt.plot(loss_comp["oLBFGS"], linewidth = lwd) plt.plot(loss_comp["adaQN"], linewidth = lwd) nticks = 10 tick_scal = loss_comp.shape[0] * batch_size / nticks plt.xticks([int(i*loss_comp.shape[0]/nticks) for i in range(nticks)], [int(i*tick_scal) for i in range(nticks)]) ax.legend(fancybox=True, prop={'size':15}) plt.ylabel('Multinomial Loss (test set)', size=18) plt.xlabel('Accessed Data Points', size=15) plt.title(txt_plot, size=20) plt.ylim(0, 6 * 10**4) plt.show()
example/example_stochqn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import ee ee.Initialize() from geetools import tools import ipygee as ui import geedatasets l8 = geedatasets.landsat.Landsat8SR() col = l8.collection().limit(10) # ## Parametrize CLOUD_COVER from 0-100 to 0-1 parametrized = tools.imagecollection.parametrizeProperty(col, 'CLOUD_COVER', [0, 100], [0, 1]) cloud_cover = parametrized.toList(10).map(lambda img: ee.Image(img).get('CLOUD_COVER_PARAMETRIZED')) ui.eprint(cloud_cover) # ## Values out of range parametrized2 = tools.imagecollection.parametrizeProperty(col, 'CLOUD_COVER', [0, 80], [0, 1]) out_of_range = parametrized2.toList(10).map(lambda img: ee.Image(img).get('CLOUD_COVER_PARAMETRIZED')) ui.eprint(out_of_range)
notebooks/imagecollection/parametrizeProperty.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Weather data and solar radiation # Objetives: # # - Obtain weather data from Energy Plus # - Read weather data # - Find solar radiation on a tilted surface # - Visualize and discuss the data # # ## Obtain weather data from EnergyPlus files # # ### Download data file # # Download the weather file with extension .epw from http://climate.onebuilding.org or from https://energyplus.net/weather; for example: `FRA_Lyon.074810_IWEC.epw` or `FRA_AR_Lyon-Bron.AP.074800_TMYx.2004-2018`. # # ### Read weather data # # + from dm4bem import read_epw, sol_rad_tilt_surf filename = 'FRA_Lyon.074810_IWEC.epw' # filename = 'FRA_AR_Lyon-Bron.AP.074800_TMYx.2004-2018.epw' # - # In weather file, data corresponding to each month is from another year. [data, meta] = read_epw(filename, coerce_year=None) month_year = data['month'].astype(str) + '-' + data['year'].astype(str) print(f"Months - years in the dataset: {sorted(set(month_year))}") # From the dataset, select air temperature, normal solar radiation and diffuse solar radiationreceived during 60 minutes prior to timestamp. Change all year too the same year, e.g. 2000. weather_data = data[["temp_air", "dir_n_rad", "dif_h_rad"]] weather_data.index = weather_data.index.map(lambda t: t.replace(year=2000)) # Select a period and the air temperature, normal solar radiation and diffuse solar radiationreceived during 60 minutes prior to timestamp. # + start_date = '2000-01-03 12:00:00' end_date = '2000-01-03 18:00:00' weather_data = weather_data[(weather_data.index >= start_date) & ( weather_data.index < end_date)] del data # - weather_data # ### Solar radiation on a tilted surface # # Given a tilted wall and knowing the albedo of the surface in front of it, calculate the direct, diffuse and reflected radiation incident on the wall. # + surface_orientation = {'slope': 90, 'azimuth': 0, 'latitude': 45} albedo = 0.2 rad_surf = sol_rad_tilt_surf( weather_data, surface_orientation, albedo) rad_surf # + [markdown] tags=[] # ## Solar radiation on a tilted surface `sol_rad_tilt_sur` # # Let’s consider a tilted surface having another surface (e.g., ground) in front of it. Given the weather data, the surface orientation, and the albedo of the ground in front of the surface, find the direct, diffuse and reflected solar radiation for this surface. # + import numpy as np import pandas as pd B = surface_orientation['slope'] Z = surface_orientation['azimuth'] L = surface_orientation['latitude'] # Transform degrees in radians B = B * np.pi / 180 Z = Z * np.pi / 180 L = L * np.pi / 180 n = weather_data.index.dayofyear # + [markdown] tags=[] # **Total solar radiation** is the amount of radiation received on a surface during the number of minutes preceding the time indicated: # # $$ G_{sr} = G_{dir} + G_{dif} + G_r$$ # # where: # # - $G_{dir}$ **direct normal** or **beam radiation**: amount of solar radiation received directly from the solar disk on a surface perpendicular to the sun’s rays, during the number of minutes preceding the time indicated, Wh/m². # # - $G_{dif}$ **diffuse radiation**: amount of solar radiation received after scattering by the atmosphere, Wh/m². Note: it does not include the diffuse infrared radiation emitted by the atmosphere. # # - $G_r$ **total solar radiation coming by reflection** from the surface in front of the wall (usually, ground), Wh/m². # + [markdown] tags=[] # ### Direct radiation, $G_{dir}$ # # The direct radiation on the surface, $G_{dir}$, depends on the *direct normal (or beam) radiation*, $G_n$, and the *incidence angle*, $\theta$, between the solar beam and the normal to the wall [2] (§11.2.1). # # In order to calculate the *incidence angle*, $\theta$, we need: # # - $\phi$ *latitude*, the angle between the position and the Equator, ranging from 0° at the Equator to 90° at the North Pole and -90° at the South Pole. $-90 ^{\circ} \leq \phi \leq 90 ^{\circ}$ # # - $\beta$ *slope*, the angle between the plane of the surface and the horizontal. $\beta > 90 ^{\circ}$ # # - $\gamma$ *azimuth*, the angle between the projection on a horizontal plane of the normal to the surface and the local meridian; south is zero, east negative, and west positive. $-180 ^{\circ} \leq \gamma \leq 180 ^{\circ}$. # # - $\delta$ *declination angle*, the angle between the sun at noon (i.e., when the sun is on the local meridian) and the plane of the equator, north positive [1](eq. 1.6.1a), [2](§11.2.1.1, eq. (78)): # $$ \delta = 23.45 \sin \left( 360 \frac{284 + n}{365} \right)$$ # - declination_angle = 23.45 * np.sin(360 * (284 + n) / 365 * np.pi / 180) d = declination_angle * np.pi / 180 # + [markdown] tags=[] # - $\omega$ *hour angle*, the angle between the sun and the local meridian due to rotation of the earth around its axis at 15° per hour [1]: # # $$ \omega = 15\left ( hour + \frac{minute}{60} - 12 \right )$$ # # where *hour* and *minute* is the solar time: # - hour = weather_data.index.hour minute = weather_data.index.minute + 60 hour_angle = 15 * ((hour + minute / 60) - 12) h = hour_angle * np.pi / 180 # + [markdown] tags=[] # $-180 ^{\circ} \leq \omega \leq 180 ^{\circ}$. $\omega < 0$ in the morning, $\omega = 0$ at noon, and $\omega > 0$ in the afternoon. Hour angle is used with the declination to give the direction of apoint on the celestial sphere. # + [markdown] tags=[] # The **incidence angle**, $\theta$, is the angle between the solar beam on the surface and the normal to the surface [1](eq. 1.6.2), [2] (eq. 78): # # $$\theta = \arccos (\sin \delta \sin \phi \cos \beta - \sin \delta \cos \phi \sin \beta \cos \gamma + \cos \delta \cos \phi \cos \beta \cos \omega + \cos \delta \sin \phi \sin \beta \cos \gamma \cos \omega + \cos \delta \sin \beta \sin \gamma \sin \omega)$$ # - theta = np.sin(d) * np.sin(L) * np.cos(B) theta -= np.sin(d) * np.cos(L) * np.sin(B) * np.cos(Z) theta += np.cos(d) * np.cos(L) * np.cos(B) * np.cos(h) theta += np.cos(d) * np.sin(L) * np.sin(B) * np.cos(Z) * np.cos(h) theta += np.cos(d) * np.sin(B) * np.sin(Z) * np.sin(h) theta = np.array(np.arccos(theta)) theta[theta > (np.pi / 2)] = np.pi / 2 # + [markdown] tags=[] # The **direct radiation**, $G_{dir}$ on the surface is: # # $$G_{dir} = G_{dir,n} \cos \theta$$ # # where *direct normal radiation* or *beam radiation*, $G_n$, is the amount of solar radiation (in Wh/m²) received directly from the solar disk on the surface perpendicular to the sun’s rays, during the number of minutes preceding the time indicated. It is given by weather data. # + tags=[] dir_rad = weather_data["dir_n_rad"] * np.cos(theta) dir_rad[dir_rad < 0] = 0 # + [markdown] tags=[] # ### Diffuse Radiation, $G_{dif}$ # # The diffuse radiation on the wall is a function on its slope, $\beta$, and # the isotropic diffuse solar radiation, $G_{dif,h}$, [2](§1.2.1.2, eq. 79, p. 31): # # $$ G_{dif} = G_{dif,h} (1 + \cos \beta) / 2$$ # - dif_rad = weather_data["dif_h_rad"] * (1 + np.cos(B)) / 2 # + [markdown] tags=[] # ### Solar radiation reflected by the ground # # Considering the radiation reflected by the ground as isotropic, the reflected radiation that gets onto the wall is a function of its slope, albedo and total horizontal radaition [2](§11.2.1.3). # # The normal horizontal radiation is [2] (eq. 80): # $$ G_{dir,h} = G_{dn} \sin \gamma$$ # + tags=[] gamma = np.cos(d) * np.cos(L) * np.cos(h) gamma += np.sin(d) * np.sin(L) gamma = np.array(np.arcsin(gamma)) gamma[gamma < 1e-5] = 1e-5 dir_h_rad = weather_data["dir_n_rad"] * np.sin(gamma) # + [markdown] tags=[] # The total radiation received by reflection is: # $$G_r = (G_{dir,h} + G_{dif,h}) albedo (1 - \cos \beta)/2$$ # - ref_rad = (dir_h_rad + weather_data["dif_h_rad"]) * albedo ref_rad *= (1 - np.cos(B) / 2) # + [markdown] tags=[] # ### Definitions # # $G_{dir,n}$ **Direct normal** or **beam radiation**. Amount of solar radiation in Wh/m² received directly from the solar disk on a surface perpendicular to the sun’s rays, during the number of minutes preceding the time indicated. # # $G_{dif,h}$ **Diffuse horizontal radiation**. Amount of solar radiation in Wh/m² received after scattering by the atmosphere. This definition distinguishes the diffuse solar radiation from infrared radiation emitted by the atmosphere. # # **Total Solar Radiation**. Total amount of direct and diffuse solar radiation in Wh/m² received on a surface during the number of minutes preceding the time indicated. # # **Global radiation.** Total solar radiation given on a horizontal surface. # # **Solar Time.** Time based on the apparent position of the sun in the sky with noon the time when the sun crosses the observer meridian. # # ### Definitions for angles (in degrees) # # $\phi$ **Latitude.** Angle between the position and the Equator, ranging from 0° at the Equator to 90° at the North Pole and -90° at the South Pole. $-90 ^{\circ} \leq \phi \leq 90 ^{\circ}$ # # $\beta$ **Slope.** Angle between the plane of the surface and the horizontal. $\beta > 90 ^{\circ}$ # # $\gamma$ **Azimuth.** Angle between the projection on a horizontal plane of the normal to the surface and the local meridian; south is zero, east negative, and west positive. $-180 ^{\circ} \leq \gamma \leq 180 ^{\circ}$. # # $\delta$ **Declination.** Angle between the sun at noon (i.e., when the sun is on the local meridian) and the plane of the equator, north positive [1, eq. 1.6.1a): # # $$ \delta = 23.45 \sin \left( 360 \frac{284 + n}{365} \right)$$ # # where $n$ is the day of the year. $-23.45 ^{\circ} \leq \delta \leq 23.45 ^{\circ}$. Declination is used with hour angle to give the direction of apoint on the celestial sphere. # # $\omega$ **Hour angle.** Angle between the sun and the local meridian due to rotation of the earth around its axis at 15° per hour [1]: # # $$ \omega = 15\left ( hour + \frac{minute}{60} - 12 \right )$$ # # where *hour* and *minute* is the solar time. $-180 ^{\circ} \leq \omega \leq 180 ^{\circ}$; $\omega < 0$ in the morning, $\omega = 0$ at noon, and $\omega > 0$ in the afternoon. Hour angle is used with the declination to give the direction of apoint on the celestial sphere. # # $\theta$ **Incidence.** Angle between the solar beam on the surface and the normal to the surface [1, eq. 1.6.2]: # # $$\theta = \arccos (\sin \delta \sin \phi \cos \beta - \sin \delta \cos \phi \sin \beta \cos \gamma + \cos \delta \cos \phi \cos \beta \cos \omega + \cos \delta \sin \phi \sin \beta \cos \gamma \cos \omega + \cos \delta \sin \beta \sin \gamma \sin \omega)$$ # # # - # ## References # # 1. <NAME>, <NAME>, <NAME> (2020) Solar Engineering of Thermal Processes, 5th ed. John Wiley & Sons, Inc. ISBN 9781119540281 # # 2. Réglementation Thermique 2005. Méthode de calcul Th-CE. Annexe à l’arrêté du 19 juillet 2006
t01/t01_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Practice Notebook - Putting It All Together # Hello, coders! Below we have code similar to what we wrote in the last video. Go ahead and run the following cell that defines our `get_event_date`, `current_users` and `generate_report` methods. # + def get_event_date(event): return event.date def current_users(events): events.sort(key=get_event_date) machines = {} for event in events: if event.machine not in machines: machines[event.machine] = set() if event.type == "login": machines[event.machine].add(event.user) elif event.type == "logout" and event.user in machines[event.machine]: machines[event.machine].remove(event.user) return machines def generate_report(machines): for machine, users in machines.items(): if len(users) > 0: user_list = ", ".join(users) print("{}: {}".format(machine, user_list)) # - # No output should be generated from running the custom function definitions above. To check that our code is doing everything it's supposed to do, we need an `Event` class. The code in the next cell below initializes our `Event` class. Go ahead and run this cell next. class Event: def __init__(self, event_date, event_type, machine_name, user): self.date = event_date self.type = event_type self.machine = machine_name self.user = user # Ok, we have an `Event` class that has a constructor and sets the necessary attributes. Next let's create some events and add them to a list by running the following cell. events = [ Event('2020-01-21 12:45:56', 'login', 'myworkstation.local', 'jordan'), Event('2020-01-22 15:53:42', 'logout', 'webserver.local', 'jordan'), Event('2020-01-21 18:53:21', 'login', 'webserver.local', 'lane'), Event('2020-01-22 10:25:34', 'logout', 'myworkstation.local', 'jordan'), Event('2020-01-21 08:20:01', 'login', 'webserver.local', 'jordan'), Event('2020-01-23 11:24:35', 'logout', 'mailserver.local', 'chris'), ] # Now we've got a bunch of events. Let's feed these events into our `custom_users` function and see what happens. users = current_users(events) print(users) # Uh oh. The code in the previous cell produces an error message. This is because we have a user in our `events` list that was logged out of a machine he was not logged into. Do you see which user this is? Make edits to the first cell containing our custom function definitions to see if you can fix this error message. There may be more than one way to do so. # <br><br> # Remember when you have finished making your edits, rerun that cell as well as the cell that feeds the `events` list into our `custom_users` function to see whether the error message has been fixed. Once the error message has been cleared and you have correctly outputted a dictionary with machine names as keys, your custom functions are properly finished. Great! # Now try generating the report by running the next cell. generate_report(users) # Whoop whoop! Success! The error message has been cleared and the desired output is produced. You are all done with this practice notebook. Way to go! # # # Final Project - Word Cloud # For this project, you'll create a "word cloud" from a text by writing a script. This script needs to process the text, remove punctuation, ignore case and words that do not contain all alphabets, count the frequencies, and ignore uninteresting or irrelevant words. A dictionary is the output of the `calculate_frequencies` function. The `wordcloud` module will then generate the image from your dictionary. # For the input text of your script, you will need to provide a file that contains text only. For the text itself, you can copy and paste the contents of a website you like. Or you can use a site like [Project Gutenberg](https://www.gutenberg.org/) to find books that are available online. You could see what word clouds you can get from famous books, like a Shakespeare play or a novel by <NAME>. Save this as a .txt file somewhere on your computer. # <br><br> # Now you will need to upload your input file here so that your script will be able to process it. To do the upload, you will need an uploader widget. Run the following cell to perform all the installs and imports for your word cloud script and uploader widget. It may take a minute for all of this to run and there will be a lot of output messages. But, be patient. Once you get the following final line of output, the code is done executing. Then you can continue on with the rest of the instructions for this notebook. # <br><br> # **Enabling notebook extension fileupload/extension...** # <br> # **- Validating: <font color =green>OK</font>** # + # Here are all the installs and imports you will need for your word cloud script and uploader widget # !pip install wordcloud # !pip install fileupload # !pip install ipywidgets # !jupyter nbextension install --py --user fileupload # !jupyter nbextension enable --py fileupload import wordcloud import numpy as np from matplotlib import pyplot as plt from IPython.display import display import fileupload import io import sys # - # Whew! That was a lot. All of the installs and imports for your word cloud script and uploader widget have been completed. # <br><br> # **IMPORTANT!** If this was your first time running the above cell containing the installs and imports, you will need save this notebook now. Then under the File menu above, select Close and Halt. When the notebook has completely shut down, reopen it. This is the only way the necessary changes will take affect. # <br><br> # To upload your text file, run the following cell that contains all the code for a custom uploader widget. Once you run this cell, a "Browse" button should appear below it. Click this button and navigate the window to locate your saved text file. # + # This is the uploader widget def _upload(): _upload_widget = fileupload.FileUploadWidget() def _cb(change): global file_contents decoded = io.StringIO(change['owner'].data.decode('utf-8')) filename = change['owner'].filename print('Uploaded `{}` ({:.2f} kB)'.format( filename, len(decoded.read()) / 2 **10)) file_contents = decoded.getvalue() _upload_widget.observe(_cb, names='data') display(_upload_widget) _upload() # - # The uploader widget saved the contents of your uploaded file into a string object named *file_contents* that your word cloud script can process. This was a lot of preliminary work, but you are now ready to begin your script. # Write a function in the cell below that iterates through the words in *file_contents*, removes punctuation, and counts the frequency of each word. Oh, and be sure to make it ignore word case, words that do not contain all alphabets and boring words like "and" or "the". Then use it in the `generate_from_frequencies` function to generate your very own word cloud! # <br><br> # **Hint:** Try storing the results of your iteration in a dictionary before passing them into wordcloud via the `generate_from_frequencies` function. def calculate_frequencies(file_contents): # Here is a list of punctuations and uninteresting words you can use to process your text punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~''' uninteresting_words = ["the", "a", "to", "if", "is", "it", "of", "and", "or", "an", "as", "i", "me", "my", \ "we", "our", "ours", "you", "your", "yours", "he", "she", "him", "his", "her", "hers", "its", "they", "them", \ "their", "what", "which", "who", "whom", "this", "that", "am", "are", "was", "were", "be", "been", "being", \ "have", "has", "had", "do", "does", "did", "but", "at", "by", "with", "from", "here", "when", "where", "how", \ "all", "any", "both", "each", "few", "more", "some", "such", "no", "nor", "too", "very", "can", "will", "just"] # LEARNER CODE START HERE file_contents2 = "" for index, char in enumerate(file_contents): if char.isalpha() == True or char.isspace(): file_contents2 += char file_contents2 = file_contents2.split() file_without_uninteresting_words = [] for word in file_contents2: if word.lower() not in uninteresting_words and word.isalpha() == True: file_without_uninteresting_words.append(word) frequencies = {} for word in file_without_uninteresting_words: if word.lower() not in frequencies: frequencies[word.lower()] = 1 else: frequencies[word.lower()] += 1 #wordcloud cloud = wordcloud.WordCloud() cloud.generate_from_frequencies(frequencies) return cloud.to_array() # If you have done everything correctly, your word cloud image should appear after running the cell below. Fingers crossed! # + # Display your wordcloud image myimage = calculate_frequencies(file_contents) plt.imshow(myimage, interpolation = 'nearest') plt.axis('off') plt.show() # - # If your word cloud image did not appear, go back and rework your `calculate_frequencies` function until you get the desired output. Definitely check that you passed your frequecy count dictionary into the `generate_from_frequencies` function of `wordcloud`. Once you have correctly displayed your word cloud image, you are all done with this project. Nice work!
Crash Course on Python/.ipynb_checkpoints/Module 6-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_python3 # language: python # name: conda_python3 # --- # # Use SageMaker Distributed Model Parallel with Amazon SageMaker to Launch Training Job with Model Parallelization # # SageMaker Distributed Model Parallel is a model parallelism library for training large deep learning models that were previously difficult to train due to GPU memory limitations. SageMaker Distributed Model Parallel automatically and efficiently splits a model across multiple GPUs and instances and coordinates model training, allowing you to increase prediction accuracy by creating larger models with more parameters. # # Use this notebook to configure Sagemaker Distributed Model Parallel to train a model using TensorFlow and [Amazon SageMaker Python SDK](https://sagemaker.readthedocs.io/en/stable/overview.html#train-a-model-with-the-sagemaker-python-sdk). # # # ### Additional Resources # If you are a new user of Amazon SageMaker, you may find the following helpful to understand how SageMaker uses Docker to train custom models. # * To learn more about using Amazon SageMaker with your own training image, see [Use Your Own Training Algorithms # ](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html). # # * To learn more about using Docker to train your own models with Amazon SageMaker, see [Example Notebooks: Use Your Own Algorithm or Model](https://docs.aws.amazon.com/sagemaker/latest/dg/adv-bring-own-examples.html). # # ## Amazon SageMaker Initialization # # Run the following cells to initialize the notebook instance. Get the SageMaker execution role used to run this notebook. pip install sagemaker-experiments pip install sagemaker --upgrade # + # %%time import sagemaker from sagemaker import get_execution_role from sagemaker.tensorflow import TensorFlow from smexperiments.experiment import Experiment from smexperiments.trial import Trial import boto3 from time import gmtime, strftime role = ( get_execution_role() ) # provide a pre-existing role ARN as an alternative to creating a new role print(f"SageMaker Execution Role:{role}") session = boto3.session.Session() # - # ## Prepare your training script # # Run the following cells to view example-training scripts for TensorFlow versions 2.3. The `tf2.py` is pure model paralleism and `tf2_hvd.py` is data/model paralleism using Horovod. # Run this cell to see an example of a training scripts that you can use to configure - # SageMaker Distributed Model Parallel with TensorFlow versions 2.3 # !cat utils/tf2.py # Run this cell to see an example of a training scripts that you can use to configure - # SageMaker Distributed Model Parallel using Horvod with TensorFlow 2.3 # !cat utils/tf2_hvd.py # ## Define SageMaker Training Job # # Next, you will use SageMaker Estimator API to define a SageMaker Training Job. You will use an [`Estimator`](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html) to define the number and type of EC2 instances Amazon SageMaker uses for training, as well as the size of the volume attached to those instances. # # You must update the following: # * `processes_per_host` # * `entry_point` # * `instance_count` # * `instance_type` # * `base_job_name` # # In addition, you can supply and modify configuration parameters for the SageMaker Distributed Model Parallel library. These parameters will be passed in through the `distributions` argument, as shown below. # # ### Update the Type and Number of EC2 Instances Used # # Pick your `entry_point` from one of the example scripts: `tf2.py`, `tf2_hvd.py` # # Specify your `processes_per_host`, for `tf2.py` only use 2, for `tf2_hvd.py` use at least 4. Note that it must be a multiple of your partitions, which by default is 2. # # The instance type and number of instances you specify in `instance_type` and `instance_count` respecitvely will determine the number of GPUs Amazon SageMaker uses during training. Explicitly, `instance_type` will determine the number of GPUs on a single instance and that number will be multiplied by `instance_count`. # # You must specify values for `instance_type` and `instance_count` so that the total number of GPUs available for training is equal to `partitions` in `parameters` of your model parallel distributions argument in the Estimator API # # If you use `tf2_hvd.py`, the total number of model replicas your training job can support will be equal to the total number of GPUs you specify, divided by `partitions`. Therefore, if you use Horovod for data parallelization, specify the total number of GPUs to be the desired number of model replicas times `partitions`: `total-model-replicas` x `partitions`. # # To look up instances types, see [Amazon EC2 Instance Types](https://aws.amazon.com/sagemaker/pricing/). # # ### Uploading Checkpoint During Training or Resuming Checkpoint from Previous Training # We also provide a custom way for users to upload checkpoints during training or resume checkpoints from previous training. We have integrated this into our `tf2.py` example script. Please see the functions `aws_s3_sync`, `sync_local_checkpoints_to_s3`, and `sync_s3_checkpoints_to_local`. For the purpose of this example, we are only uploading a checkpoint during training, by using `sync_local_checkpoints_to_s3`. # # After you have updated `entry_point`, `instance_count`, `instance_type` and `base_job_name`, run the following to create an estimator. # + sagemaker_session = sagemaker.session.Session(boto_session=session) mpioptions = "-verbose -x orte_base_help_aggregate=0 " # choose an experiment name (only need to create it once) experiment_name = "SM-MP-DEMO" all_experiment_names = [exp.experiment_name for exp in Experiment.list()] # Load the experiment if it exists, otherwise create if experiment_name not in all_experiment_names: customer_churn_experiment = Experiment.create( experiment_name=experiment_name, sagemaker_boto_client=boto3.client("sagemaker") ) else: customer_churn_experiment = Experiment.load( experiment_name=experiment_name, sagemaker_boto_client=boto3.client("sagemaker") ) # Create a trial for the current run trial = Trial.create( trial_name="SMD-MP-demo-{}".format(strftime("%Y-%m-%d-%H-%M-%S", gmtime())), experiment_name=customer_churn_experiment.experiment_name, sagemaker_boto_client=boto3.client("sagemaker"), ) smd_mp_estimator = TensorFlow( entry_point="tf2.py", # Pick your train script source_dir="utils", role=role, framework_version="2.3.1", py_version="py37", instance_type="ml.p3.16xlarge", sagemaker_session=sagemaker_session, instance_count=1, distribution={ "smdistributed": { "modelparallel": { "enabled": True, "parameters": { "microbatches": 2, "partitions": 2, "pipeline": "interleaved", "optimize": "memory", # "horovod": True, #Set to True if using the horovod script }, } }, "mpi": { "enabled": True, "processes_per_host": 2, # Pick your processes_per_host "custom_mpi_options": mpioptions, }, }, base_job_name="SMD-MP-demo", ) # - # Finally, you will use the estimator to launch the SageMaker training job. smd_mp_estimator.fit( experiment_config={ "ExperimentName": customer_churn_experiment.experiment_name, "TrialName": trial.trial_name, "TrialComponentDisplayName": "Training", } )
training/distributed_training/tensorflow/model_parallel/mnist/tensorflow_smmodelparallel_mnist.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="5HExLQrE4ZxR" colab_type="text" # <h1><font color='blue'> 8E and 8F: Finding the Probability P(Y==1|X)</font></h1> # + [markdown] id="4LuKrFzC4ZxV" colab_type="text" # <h2><font color='Geen'> 8E: Implementing Decision Function of SVM RBF Kernel</font></h2> # + [markdown] id="1wES-wWN4ZxX" colab_type="text" # <font face=' Comic Sans MS' size=3>After we train a kernel SVM model, we will be getting support vectors and their corresponsing coefficients $\alpha_{i}$ # # Check the documentation for better understanding of these attributes: # # https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html # <img src='https://i.imgur.com/K11msU4.png' width=500> # # As a part of this assignment you will be implementing the ```decision_function()``` of kernel SVM, here decision_function() means based on the value return by ```decision_function()``` model will classify the data point either as positive or negative # # Ex 1: In logistic regression After traning the models with the optimal weights $w$ we get, we will find the value $\frac{1}{1+\exp(-(wx+b))}$, if this value comes out to be < 0.5 we will mark it as negative class, else its positive class # # Ex 2: In Linear SVM After traning the models with the optimal weights $w$ we get, we will find the value of $sign(wx+b)$, if this value comes out to be -ve we will mark it as negative class, else its positive class. # # Similarly in Kernel SVM After traning the models with the coefficients $\alpha_{i}$ we get, we will find the value of # $sign(\sum_{i=1}^{n}(y_{i}\alpha_{i}K(x_{i},x_{q})) + intercept)$, here $K(x_{i},x_{q})$ is the RBF kernel. If this value comes out to be -ve we will mark $x_{q}$ as negative class, else its positive class. # # RBF kernel is defined as: $K(x_{i},x_{q})$ = $exp(-\gamma ||x_{i} - x_{q}||^2)$ # # For better understanding check this link: https://scikit-learn.org/stable/modules/svm.html#svm-mathematical-formulation # </font> # + [markdown] id="z830CfMk4Zxa" colab_type="text" # ## Task E # + [markdown] id="MuBxHiCQ4Zxc" colab_type="text" # > 1. Split the data into $X_{train}$(60), $X_{cv}$(20), $X_{test}$(20) # # > 2. Train $SVC(gamma=0.001, C=100.)$ on the ($X_{train}$, $y_{train}$) # # > 3. Get the decision boundry values $f_{cv}$ on the $X_{cv}$ data i.e. ` `$f_{cv}$ ```= decision_function(```$X_{cv}$```)``` <font color='red'>you need to implement this decision_function()</font> # + id="fCgMNEvI4Zxf" colab_type="code" colab={} import numpy as np import pandas as pd from sklearn.datasets import make_classification import numpy as np from sklearn.svm import SVC # + id="ANUNIqCe4Zxn" colab_type="code" colab={} X, y = make_classification(n_samples=5000, n_features=5, n_redundant=2, n_classes=2, weights=[0.7], class_sep=0.7, random_state=15) # + [markdown] id="tHie1zqH4Zxt" colab_type="text" # ### Pseudo code # # clf = SVC(gamma=0.001, C=100.)<br> # clf.fit(Xtrain, ytrain) # # <font color='green'>def</font> <font color='blue'>decision_function</font>(Xcv, ...): #use appropriate parameters <br> # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<font color='green'>for</font> a data point $x_q$ <font color='green'>in</font> Xcv: <br> # &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;<font color='grey'>#write code to implement $(\sum_{i=1}^{\text{all the support vectors}}(y_{i}\alpha_{i}K(x_{i},x_{q})) + intercept)$, here the values $y_i$, $\alpha_{i}$, and $intercept$ can be obtained from the trained model</font><br> # <font color='green'>return</font> <font color='grey'><i># the decision_function output for all the data points in the Xcv</i></font> # # fcv = decision_function(Xcv, ...) <i># based on your requirement you can pass any other parameters </i> # # <b>Note</b>: Make sure the values you get as fcv, should be equal to outputs of clf.decision_function(Xcv) # # + id="h43kDT3M41u5" colab_type="code" colab={} # you can write your code here # + [markdown] id="c0bKCboN4Zxu" colab_type="text" # <h2><font color='Geen'> 8F: Implementing Platt Scaling to find P(Y==1|X)</font></h2> # + [markdown] id="nMn7OEN94Zxw" colab_type="text" # Check this <a href='https://drive.google.com/open?id=133odBinMOIVb_rh_GQxxsyMRyW-Zts7a'>PDF</a> # <img src='https://i.imgur.com/CAMnVnh.png'> # # + [markdown] id="e0n5EFkx4Zxz" colab_type="text" # ## TASK F # + [markdown] id="t0HOqVJq4Zx1" colab_type="text" # # > 4. Apply SGD algorithm with ($f_{cv}$, $y_{cv}$) and find the weight $W$ intercept $b$ ```Note: here our data is of one dimensional so we will have a one dimensional weight vector i.e W.shape (1,)``` # # > Note1: Don't forget to change the values of $y_{cv}$ as mentioned in the above image. you will calculate y+, y- based on data points in train data # # > Note2: the Sklearn's SGD algorithm doesn't support the real valued outputs, you need to use the code that was done in the `'Logistic Regression with SGD and L2'` Assignment after modifying loss function, and use same parameters that used in that assignment. # <img src='https://i.imgur.com/zKYE9Oc.png'> # if Y[i] is 1, it will be replaced with y+ value else it will replaced with y- value # # > 5. For a given data point from $X_{test}$, $P(Y=1|X) = \frac{1}{1+exp(-(W*f_{test}+ b))}$ where ` `$f_{test}$ ```= decision_function(```$X_{test}$```)```, W and b will be learned as metioned in the above step # + [markdown] id="oTY7z2bd4Zx2" colab_type="text" # __Note: in the above algorithm, the steps 2, 4 might need hyper parameter tuning, To reduce the complexity of the assignment we are excluding the hyerparameter tuning part, but intrested students can try that__ # + [markdown] id="CM3odN1Z4Zx3" colab_type="text" # # If any one wants to try other calibration algorithm istonic regression also please check these tutorials # # 1. http://fa.bianp.net/blog/tag/scikit-learn.html#fn:1 # # 2. https://drive.google.com/open?id=1MzmA7QaP58RDzocB0RBmRiWfl7Co_VJ7 # # 3. https://drive.google.com/open?id=133odBinMOIVb_rh_GQxxsyMRyW-Zts7a # # 4. https://stat.fandom.com/wiki/Isotonic_regression#Pool_Adjacent_Violators_Algorithm #
Linear Models/8E_F_LR_SVM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Filtering Signals # # In this notebook, you will get more familiar with filtering data. In the first part, we will go over some examples of creating signals, combining them, and then applying filters to separate the components. # # ## Filter Types # # Filters are separated into two types: lowpass and highpass. The distinction is based on which frequency band the filters tend to keep. Lowpass filters allow frequencies lower than the cutt-off frequency to pass through, whereas highpass filters perform the opposite operation by allowing frequencies higher than the cut-off frequency and cutting off lower frequencies. # # If you want to read more about filter design in analog and digital domains, I highly encourage you to read the free ebook provided here: # # [DSPRelated.com - Introduction to Digital Filters](https://www.dsprelated.com/freebooks/filters/) # Importing useful libraries: # NumPy for calculations # SciPy for filtering operations # Matplotlib for plotting import numpy as np import scipy.signal as sig import matplotlib.pyplot as plt # + # Let's make some signals, sampled at 1000Hz fs = 1000 # sampling frequency dt = 1/fs # sampling interval T = 1 # duration of 1 second t = np.linspace(0,1,fs) # time vector # signal #1 is a sine wave of f1 = 10 Hz and phase of 0 rad alpha1=0.5 f1 = 10 phi1 = 0 x1 = alpha1*np.sin(2*np.pi*f1*t + phi1) # signal #2 is a sine wave of f2 = 25 Hz and phase of pi/4 rad alpha2 = 0.8 f2 = 25 phi2 = np.pi/4 x2 = alpha2*np.sin(2*np.pi*f2*t + phi2) # signal #3 is a sine wave of f3 = 100 Hz and phase of 3pi/2 rad alpha3=0.69 f3 = 100 phi3 = 3*np.pi/2 x3 = alpha3*np.sin(2*np.pi*f3*t + phi3) # signal y is the superposition of all 3 signals y = x1+x2+x3 # And let's plot the signals plt.figure(num=1,figsize=[16,10]) plt.subplot(411) plt.plot(t,x1,color='blue') plt.xlim([0,1]) plt.ylim([-1,1]) plt.ylabel('X1') plt.subplot(412) plt.plot(t,x2,color='green') plt.xlim([0,1]) plt.ylim([-1,1]) plt.ylabel('X2') plt.subplot(413) plt.plot(t,x3,color='red') plt.xlim([0,1]) plt.ylim([-1,1]) plt.ylabel('X3') plt.subplot(414) plt.plot(t,y,color='magenta') plt.xlim([0,1]) plt.ylabel('X1+X2+X3') plt.xlabel('Time (s)') plt.show() # - # The magenta signal is the superposition of all three signals. As you can see, this signal is erratic and you can't really see the different components that comprise the full signal easily. What we can do, is perform a spectral analysis! So let's plot the FFT of the combined signal. # # For more information on the FFT, see the SciPy documentation [here](https://docs.scipy.org/doc/scipy/reference/tutorial/fft.html#discrete-sine-transforms). # + from scipy.fft import fft,fftfreq N = len(y) # number of FFT points (length of signal) Y = fft(y) # calculate the FFT fv = fftfreq(N, dt)[:N//2] # make the frequency vector plt.figure(num=4,figsize=[8,7]) plt.plot(fv, 2.0/N * np.abs(Y[0:N//2])) plt.xlim([0,150]) plt.ylim([0,1]) plt.xlabel('Frequency (Hz)') plt.ylabel('|Y(f)|') plt.grid() plt.show() # - # Here we go, three distinct peaks, centered at the frequencies of the individual components, with their respective amplitudes. So we can tell that our combined signal $y(t)=x_1(t)+x_2(t)+x_3(t)$ has three frequency components at the frequencies 10, 20, and 100 Hz, with amplitudes of 0.5, 0.8, and 0.69 respectively. # ## Filtering the signal # # In this section, we will use the SciPy module's function _butter()_ to make our filters. If you would like to learn more about the process, the documentation is provided [here](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html). # # ### Lowpass filtering # # Now that we've identified the three frequency components of our signal $y(t)$, let's make a filter to cut the 100 Hz frequency out. We will use the following functions: # + # Let's define a function for low-pass filtering using a digital Butterworth filter def butter_lowpass(cutoff, fs, order=5): fnyq = 0.5 * fs normal_cutoff = cutoff / fnyq b, a = sig.butter(order, normal_cutoff, btype='low', output='ba', fs=fs, analog=False) return b, a # And a second function for high-pass filtering def butter_highpass(cutoff, fs, order=5): fnyq = 0.5 * fs normal_cutoff = cutoff / fnyq b, a = sig.butter(order, normal_cutoff, btype='high', output='ba', fs=fs, analog=False) return b, a # This function filters the input data and returns the filtered sequence def butter_filter(data, cutoff, fs, order=5, filtype=''): if filtype.lower()=='lowpass': b, a = butter_lowpass(cutoff, fs, order=order) elif filtype.lower()=='highpass': b, a = butter_highpass(cutoff, fs, order=order) y = sig.filtfilt(b, a, data) return y # + # Make a low-pass filter order = 6 fc = 40 # desired cutoff frequency of the filter, Hz # Get the filter coefficients so we can check its frequency response. b,a = butter_lowpass(fc, fs, order) # Plot the frequency response. w, h = sig.freqz(b, a, worN=8000) plt.figure(figsize=(16,8)) plt.subplot(311) plt.plot(0.5*fs*w/np.pi, np.abs(h), 'b') plt.plot(fc, 0.5*np.sqrt(2), 'ko') plt.axvline(fc, color='k') plt.xlim(0, 0.5*fs) plt.title("Lowpass Filter Frequency Response") plt.xlabel('Frequency [Hz]') plt.grid() # Filter the data, and plot both the original and filtered signals. yf = butter_filter(y, fc, fs, order, 'lowpass') plt.subplot(312) plt.plot(t, y, 'b-', label='data') plt.plot(t, yf, 'r-', linewidth=2, label='filtered data') plt.xlabel('Time [sec]') plt.xlim([0,1]) plt.grid() plt.legend() # Let's calculate the FFT of the filtered signal and plot the frequency components N = len(yf) # number of FFT points (length of signal) Yf = fft(yf) # calculate the FFT fv = fftfreq(N, dt)[:N//2] # make the frequency vector plt.subplot(313) plt.plot(fv, 2.0/N * np.abs(Yf[0:N//2])) plt.xlim([0,150]) plt.ylim([0,1]) plt.xlabel('Frequency (Hz)') plt.ylabel('|Y(f)|') plt.grid() plt.subplots_adjust(hspace=0.75) plt.show() # - # The first figure shows the frequency response of the $6^{th}$ order lowpass Butterworth filter. At 100Hz, the filter completely attenuates the unwanted frequency, as shown in the third figure. The middle panel shows the comparison of the unfiltered signal (in <span style="color:blue">blue</span>) while the filtered version is shown in <span style="color:red">red</span>. # ### Highpass filtering # # Let's do the opposite: let's remove all frequencies _below_ 100 Hz. As an exercise, fill in the missing code in the cell below: # + # Make a high-pass filter order = 6 fc = ... # desired cutoff frequency of the filter, Hz # Get the filter coefficients so we can check its frequency response. b,a = ... # Filter the data. yf = ... # Calculate the FFT of the filtered signal N = len(yf) # number of FFT points (length of signal) Yf = fft(yf) # calculate the FFT fv = fftfreq(N, dt)[:N//2] # make the frequency vector # + # Plot everything, as before # Filter's frequency response. w, h = sig.freqz(b, a, worN=8000) plt.figure(figsize=(16,8)) plt.subplot(311) plt.plot(0.5*fs*w/np.pi, np.abs(h), 'b') plt.plot(fc, 0.5*np.sqrt(2), 'ko') plt.axvline(fc, color='k') plt.xlim(0, 0.5*fs) plt.title("Lowpass Filter Frequency Response") plt.xlabel('Frequency [Hz]') plt.grid() # Plot both the original and filtered signals. plt.subplot(312) plt.plot(t, y, 'b-', label='data') plt.plot(t, yf, 'r-', linewidth=2, label='filtered data') plt.xlabel('Time [sec]') plt.xlim([0,1]) plt.grid() plt.legend() # Plot the frequency spectrum of the signal plt.subplot(313) plt.plot(fv, 2.0/N * np.abs(Yf[0:N//2])) plt.xlim([0,150]) plt.ylim([0,1]) plt.xlabel('Frequency (Hz)') plt.ylabel('|Y(f)|') plt.grid() plt.subplots_adjust(hspace=0.75) plt.show() # - # # Hopefully, you were able to solve the exercise above and you filtered out the lower frequencies in the signal! If so, you may notice that the filtered signal is very similar to just $x_3$. Is that the case? # # # ### Bandpass filtering # # Now you know how to use filters to attenuate high and low frequencies out of a signal. In real-life scenarios, things are usually a little more complex. Let's say we have a signal that is affected by high-frequency noise. Which filter would you use? What if you had low-frequency noise? Can you imagine how we would attenuate all noise, both low- and high-frequency, while keeping signals in a specific band unaffected? Let's see two examples. # #### Method \#1 # # We will use the bandpass method provided in the _butter()_ function. # + # Let's add a new frequency to our signal at 200 Hz alpha4 = 1 phi4 = 0 f4 = 200 x4 = alpha4*np.sin(2*np.pi*f4*t + phi4) yn = y+x4 # + # Let's define a function for band-pass filtering as before def butter_bandpass(cutoff, fs, order=5): if len(cutoff)!=2: print('error: please define frequencies as [fcl, fch] in Hz!') return -1 fnyq = 0.5 * fs normal_cutoff = cutoff / fnyq b, a = sig.butter(order, normal_cutoff, btype='band', analog=False) return b, a # Modify this function so that it filters the input data and returns the filtered sequence even as a bandpass def butter_filter(data, cutoff, fs, order=5, filtype=''): if filtype.lower()=='lowpass': b, a = butter_lowpass(cutoff, fs, order=order) elif filtype.lower()=='highpass': b, a = butter_highpass(cutoff, fs, order=order) elif filtype.lower()=='bandpass': b, a = butter_bandpass(cutoff, fs, order=order) y = sig.filtfilt(b, a, data) return y # + # Make a band-pass filter order = 6 fc = np.array([64,256]) # desired cutoff frequencies of the filter, Hz # Get the filter coefficients so we can check its frequency response. b,a = butter_bandpass(fc, fs, order) # Filter the data. yfn1 = butter_filter(yn, fc, fs, order, filtype='bandpass') # Calculate the FFT of the filtered signal N = len(yfn1) # number of FFT points (length of signal) Yfn1 = fft(yfn1) # calculate the FFT fv = fftfreq(N, dt)[:N//2] # make the frequency vector # + # Plot everything, as before # Filter's frequency response. wbp, hbp = sig.freqz(b, a, worN=8000) plt.figure(figsize=(16,8)) plt.subplot(311) plt.plot(0.5*fs*wbp/np.pi, np.abs(hbp), 'b') plt.plot(fc[0], 0.5*np.sqrt(2), 'ko') plt.plot(fc[1], 0.5*np.sqrt(2), 'ko') plt.axvline(fc[0], color='k') plt.axvline(fc[1], color='k') plt.xlim(0, 0.5*fs) plt.title("Lowpass Filter Frequency Response") plt.xlabel('Frequency [Hz]') plt.grid() # Plot both the original and filtered signals. plt.subplot(312) plt.plot(t, yn, 'b-', label='data') plt.plot(t, yfn1, 'r-', linewidth=2, label='filtered data') plt.xlabel('Time [sec]') plt.xlim([0,1]) plt.grid() plt.legend() # Plot the frequency spectrum of the signal plt.subplot(313) plt.plot(fv, 2.0/N * np.abs(Yfn1[0:N//2])) plt.xlim([0,300]) plt.ylim([0,1]) plt.xlabel('Frequency (Hz)') plt.ylabel('|Y(f)|') plt.grid() plt.subplots_adjust(hspace=0.75) plt.show() # - # #### Method \#2 # # An alternative would be to apply two separate filters in succession: a low-pass filter at our low cutoff frequency $f^c_l$, followed by a high-pass filter at our high cutoff frequency $f^c_h$. For this part, fill in the code below to perform this exact procedure. You can use the provided functions from above, or write your own (_advanced_). # + fch = ... # highpass cutoff frequency fcl = ... # lowpass cutoff frequency fc = np.array([fcl, fch]) # Low-pass filtering of the data ylp = butter_filter(..., fc[0], fs, order, 'lowpass') # High-pass filtering of the data yhp = butter_filter(..., fc[1], fs, order, 'highpass') # Final signal (simple rename) yfn2 = yhp # Calculate the FFT of the filtered signal N = ... # number of FFT points (length of signal) Yfn2 = ... # calculate the FFT fv = fftfreq(N, dt)[:N//2] # make the frequency vector # + # Plot the double-filtered signal and compare it to the previously bandpass filtered signal # Filter's frequency response. blp,alp = butter_lowpass(fc[0], fs, order) bhp,ahp = butter_highpass(fc[1], fs, order) bbp,abp = butter_bandpass(fc, fs, order) wlp, hlp = sig.freqz(blp, alp, worN=8000) whp, hhp = sig.freqz(bhp, ahp, worN=8000) wbp, hbp = sig.freqz(bbp, abp, worN=8000) plt.figure(figsize=(16,8)) plt.subplot(311) plt.plot(0.5*fs*wbp/np.pi, np.abs(hbp), 'b-', label='bandpass') plt.plot(0.5*fs*wlp/np.pi, np.abs(hlp), 'g--', label='lowpass') plt.plot(0.5*fs*whp/np.pi, np.abs(hhp), 'r--', label='highpass') plt.plot(fc[0], 0.5*np.sqrt(2), 'ko') plt.plot(fc[1], 0.5*np.sqrt(2), 'ko') plt.axvline(fc[0], color='k', linestyle='dashed') plt.axvline(fc[1], color='k', linestyle='dashed') plt.xlim(0, 0.5*fs) plt.title("Lowpass Filter Frequency Response") plt.xlabel('Frequency [Hz]') plt.grid() plt.legend() # Plot both the original and filtered signals. plt.subplot(312) plt.plot(t, yfn1, 'b-', linewidth=2, label='filtered data (band-pass)') plt.plot(t, yfn2, 'r--', linewidth=2, label='filtered data (low+high)') plt.xlabel('Time [sec]') plt.xlim([0.5,0.75]) plt.grid() plt.legend() # Plot the frequency spectrum of the signal plt.subplot(313) plt.plot(fv, 2.0/N * np.abs(Yfn1[0:N//2]), 'b-') plt.plot(fv, 2.0/N * np.abs(Yfn2[0:N//2]), 'r--') plt.xlim([75,225]) plt.ylim([0,1]) plt.xlabel('Frequency (Hz)') plt.ylabel('|Y(f)|') plt.grid() plt.subplots_adjust(hspace=0.5) plt.show()
lessons/Jupyter Notebooks/Tutorial on filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # !pip install geopandas # !pip install descartes import numpy as np import pandas as pd import matplotlib.pyplot as plt import descartes import geopandas as gpd from geopandas import GeoDataFrame from shapely.geometry import Point, Polygon #import adjustText as aT import geopandas as gpd polygon_file = '../data/CAMS_ZIPCODE_STREET_SPECIFIC.shp' polygons = gpd.read_file(polygon_file) polygons.head() polygons = polygons.to_crs(epsg=4326) polygons.head() polygons.iloc[0].geometry # + # For google maps, we should change lat <-> lon def swap_xy(geom): def swap_xy_coords(coords): for x, y in coords: yield (y, x) ring = geom.exterior shell = type(ring)(list(swap_xy_coords(ring.coords))) holes = list(geom.interiors) for pos, ring in enumerate(holes): holes[pos] = type(ring)(list(swap_xy_coords(ring.coords))) return type(geom)(shell, holes) google_maps_locations = polygons.geometry.apply(swap_xy) # - google_maps_locations
samir_test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="WTJX0pFCQpSH" # # Loop # # --- # Many nummerical problem become easier when using loop. # + [markdown] id="i-iZvqKlUTUZ" # Famous numerical problem # $$2^x=x^2$$ which has two real-solutions $x=2$ and $x=4$. The equation is equivalent to $$ x=\frac{2\ln(x)}{\ln2} $$ or $$ x=\sqrt{2^x} $$ # # We can use the following code to safely secure the answer $x$ using numerical method. # + [markdown] id="Z4QrQk9ABGpm" # **First, we can try the following iterations** # $$x_{i+1}=\frac{2\ln{x_i}}{\ln{2}}$$ # + id="ZZARyv5HYI26" import math x=5 while True: x=2*math.log(x)/math.log(2) _=2*math.log(x)/math.log(2) if abs(x-_)<1e-15: break print(x) # + [markdown] id="ucLFMd6UB2_1" # We can observe how the value of $x$ approach the root of equation by plotting the graph. # + id="JVafYhJ1BwRO" import matplotlib.pyplot as plt for x in [2,2.5,3,3.5,4,4.5,5,5.5,6]: x_list=[x] while True: x=2*math.log(x)/math.log(2) x_list.append(x) _=2*math.log(x)/math.log(2) if abs(x-_)<1e-3: break plt.plot(x_list) plt.xlabel('Number of Iterations') plt.ylabel('Current Value of x') plt.show() # + [markdown] id="39cbK4KgGwyM" # Now, let's see what happens if we use the sequence # $$x_{i+1}=\sqrt{2^{x_i}}$$ # in the iteration # + id="y23iAtTkZROI" x=3.8 while True: x=math.sqrt(2**x) _=math.sqrt(2**x) if abs(x-_)<1e-15: break print(x) # + id="Flyn6NaVHnkP" for x in [-1,0,1,1.5,2,2.5,3,3.5,4]: x_list=[x] while True: x=math.sqrt(2**x) x_list.append(x) _=math.sqrt(2**x) if abs(x-_)<1e-3: break plt.plot(x_list) plt.xlabel('Number of Iterations') plt.ylabel('Current Value of x') plt.show() # + [markdown] id="Gt2lBd_rdPDq" # This example shows that, we can also use basics operation to solve some numerical problems that are hard to solve # $$\sqrt{3}=?$$ # First, we see that this is equivalent to find the positive root of $$x^2 -3=0$$ # we can write # $$x=\sqrt{3}$$ # We cannot use the previous method to find $\sqrt{3}$ (really?) # + [markdown] id="YH5NFyySQtaT" # ## for loop: definite iterations # ___ # We use <a>for</a> when the elements for iteration are present. For statement will implement the sets operations under the <a>for</a> block for each elements in the given iterable object. (<a>list, string, dict, tuple, range</a>). The order of element in which the loop excute follows the order of elements in that iterable object. # ### Structure of <a>For Loop</a> # For loop has the following structure, # # # # ``` # for item in (iterable)object: # action 1 # action 2 # action 3 # . # . # . # action n # ``` # # **Unindentations** imply the action outside the loop and termination of *loop block*. # # ``` # for item in (iterable)object: # action in loop 1 # action in loop 2 # action in loop 3 # ## This is where the loop block end # action outside # ``` # # **Else** can be used with for loop. The code inside *else* block is excuted after the loop finish all the elements in iterable object. # # ``` # for item in (iterable)object: # action in loop 1 # action in loop 2 # action in loop 3 # ## This is where the loop block end # else: # action_else 1 # action_else 2 # . # . # . # action outside # ``` # # + id="fJWAmWVYHGpY" for i in range(5): print(' looping ....') print('I am outside the loop') # + [markdown] id="kM6DW8zIkUbQ" # For <a>list</a> object, the iterations are done in the same order as the arrangement of **list** # + id="E6KCStvoRBUK" x = [1,'s', True, [1,2,3]] for i in x: print(i) # + [markdown] id="gziRSN7FSCJD" # This example shows how we can find the sum of 1 to 100 # + id="qiIhTyTeRaO6" sum=0 for i in range(0,101): sum=sum+i print(sum) # + [markdown] id="1mlgkEsUSRvq" # Two examples below shows how we can find the sum of even integer up until 100 by loop. # + id="8XBfEr-6SLWe" sum=0 for i in range(0,101,2): sum=sum+i print('This is the iterations of number ',i,' and the total summation is ',sum) print('Summation of this interations yields ',sum) # + id="73_N7Q3yShMG" sum=0 for i in range(0,101): if i%2==0: sum=sum+i print('This is the iterations of number ',i,' and the total summation is ',sum) print('Summation of this interations yields ',sum) # + [markdown] id="HYuis6qJWEfX" # Here is the example to calculate the fibonacci number using for loop<br> # # # Fibonacci number is defined by recurrence # $$ F(n) = # \begin{cases} # 1 & \quad n =1,2\\ # F(n)=F(n-1)+F(n-2) & \quad \text{for } n >2 # \end{cases} # $$ # # The closed form of this recurrent relation is # # $$F(n)=\frac{\left({\frac{1+\sqrt{5}}{2}}\right)^n-\left({\frac{1-\sqrt{5}}{2}}\right)^n}{\sqrt{5}}$$ # We can find all Fibonacci number without advance knowledge in mathematics using loop as follows # + id="AhiZTnIfVQft" N=int(input('n= ')) if N >2: a=1 b=1 for i in range(N-2): fibo=a+b a=b b=fibo else: fibo=1 print(fibo) # + [markdown] id="VufC6mgzQ1HR" # ## while loop: indefinite iterations # ___ # # Unlike <a>for</a>, the loop <a>while</a> can be used without iterable object. However, this comes with consequence that *infinite* loop may occurs. # # ### Structure of <a>while loop</a> # ``` # while boolean (value or statement): # action in loop 1 # action in loop 2 # action in loop 3 # . # . # . # ## This is where the loop block end # action outside # ``` # **Unindentation** and **else** can be used the same way as for loop. # + id="ec2DJQo6TTTE" cellView="both" x=0 while True: print('Hello World '+str(x)) x=x+1 # + id="uepafnyDQzgo" x=0 while x<100: x=x+1 print(x) else: print('done') # + [markdown] id="6m17o9ZjPcRW" # # Continue and Break # ___ # In some case, we need to skip some operation in the loop if some statement is True. Sometimes, the loop should be terminated when time pass. In Python, we have<a>continue</a> and <a>break</a> keywords for this kind of manipulation. # + [markdown] id="Hj0A3xiIObfj" # ## Continue # <a>continue</a> is a keyword in both types of loop.<a>continue</a> inside *for* block will skip all the action within that *iteration*. For example # + id="Nti1FO1aQMJn" for i in range(5): print(i) print('Yahoo') # + id="psA9gOKWQiJS" for i in range(5): print(i) if i == 2: continue print(i+100) # This comes after continue so it wont be printed out print('Yahoo') # + [markdown] id="tmbg8TOorlc2" # ## Break # <a>break</a> is a keyword that **terminates**(no kidding) the loop. # + id="Z1gh0C4NqE9w" x=0 while x<10: x=x+1 if x==7: break print(x) else: print('done') # + id="3Bd09SxhtbPX" for i in range(5): print(i) if i == 2: break print('Yahoo') # + [markdown] id="LDwJp-IJ4u1d" # # Function # ___ # # + [markdown] id="-9RztcJ9OXTj" # ## Definition of Function # In python, like other programming language, we can define additional function to be called when needed. The syntax for function definition is # ``` # def function_name(arg1,arg2,...): # action 1 ## # action 2 # . # . # . # ``` # # arg1, arg2, arg3 are variables or values used as inputs for the function. For example, # + id="BTHR8hNTtt9m" def square(value): print(value**2) # + id="I77uacyIt1id" square(3) # + [markdown] id="fRaju75Ft4uT" # ## Variable Space # Unlike loop, the variables used in function is *local*. # + id="b4hZ0xKFQUqG" x=0 def test(): x=1 print(x) # + id="uWsN-s65QbkM" test() # + id="gBgPX1k8QfkT" x # + id="cTPP7S7mZjc5" def test(): global x x=1 print(x) # + id="ZpA-jmzYZrLw" test() # + id="qQq7uPxqZtEB" x # + [markdown] id="sBTrwZKtQr5H" # ## Return Vs None-Return Function # In function definition, the keywords <a>return</a> defines the value of that function. If there is no <a>return</a> in the function, it value is defined to be <a>None</a> # # **The values for return can be of any type.** Function can return function or any other object. # + id="WPuxWjge4t3a" def returnfn(): return 3 print(returnfn()) # + id="wzkddDnv5XVh" def nonreturnfn(): pass print(nonreturnfn()) # + id="hxIVVVRIS7D-" def ff(): def a(): pass return a # + id="yricxOnsTF0_" ff() # + id="4wFb7Wd4SWE1" 1+returnfn() # + id="W0bxSW8nSdLQ" 1+nonreturnfn() # + id="4qP4fO_aTmhx" x=returnfn() # + id="cx2jPbdYTpuQ" x # + id="aagXUtdhTu2I" print(x) # + id="YiqIEuCw6sl_" x=nonreturnfn() # + id="Tk852SNPc0Ge" x # + id="d27szHOhTwoY" print(x) # + [markdown] id="JrfqCHB-T8lj" # ## Passing argument to function # + id="Fh2bL0_t6u6I" def mod2_addition(x,y): return (x+y)%2 # + id="pyDLXTSZMeA7" mod2_addition(3,4) # + id="ZIrFgHklUKMH" mod2_addition(3,y=4) # + id="mq4PZpKSUVve" mod2_addition(x=3,y=4) # + id="lR_coviyUYHi" mod2_addition(x=3,4) # + [markdown] id="dF3K-nQLUgVC" # ## Default value # + id="sQu43QjEMga0" def modulo_addition(x,y,mod=2): return (x+y)%mod # + id="kkKlU34Fcnte" modulo_addition(2,3) # + id="gURB8mf0UIuj" modulo_addition(5,6) # + id="HrpxvwQtUrsn" modulo_addition(5,6,7)
Coding_Hour3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ![](../../images/featuretools.png) # # # Featuretools Implementation with Dask # # A calculation of Deep Feature Synthesis from the Automated Loan Repayment notebook running on a single core takes about 25 hours on an AWS EC2 machine. Clearly we need a better approach for practical implementations of calculating a large feature matrix, one that allows us to use all cores of whatever machine we are using. # # Featuretools does have support for parallel processing if you have multiple cores (which nearly every laptop now does), but it currently sends the entire EntitySet to each process, which means you might exhaust the memory on any one core. For example, the AWS machine has 8 GB per core, which might seem like a lot until you realize the EntitySet takes up about 11 GB and setting `n_jobs=-1` will cause an out of memory error. Therefore, we cannot use the parallel processing in Featuretools and instead have to engineer our own implementation with Dask. # # Fortunately, options such as [Dask](https://dask.pydata.org/en/latest/) make it easy to take advantage of multiple cores on our own machine. In this notebook, we'll see how to run Deep Feature Synthesis in about 3 hours on a personal laptop with 8 cores and 16 GB of RAM. # # ![](../../images/dask_logo.png) # # # ## Roadmap # # Following is our plan of action for implementing Dask with Featuretools # # 1. Convert `object` data types to `category` # * This reduces memory consumption significantly # 2. Create 104 partitions of data and save to disk # * Each partition will contain all data for 1/104 of the clients # * Each partition can be used to __independently__ make an EntitySet and then a feature matrix # 3. Write a function to take a partition and create an `EntitySet` # 4. Write a function to take an `EntitySet` and calculate a `feature_matrix` that is saved to disk # 5. Use Dask to parallelize 3. and 4. to create 104 feature matrices saved on disk # 6. (Optionally) read in the individual feature matrices and combine into a single feature matrix # # The general idea is to __take advantage of all our system resources by breaking one large problem into many smaller ones.__ Each of these smaller problems can be completed on one processor which means we can run multiple (8) of these problems at a time. # # At the end, we'll have a working implementation of Dask that lets us take full advantage of our computing resources. While a naive approach to this problem would just be renting a larger machine, that won't solve our problem because the bottleneck is not RAM, but using multiple cores at a time. __Often, a better approach than getting more computating resources is to find a way to use the resources we do have as efficiently as possible.__ # + # pandas and numpy for data manipulation import pandas as pd import numpy as np # featuretools for automated feature engineering import featuretools as ft import featuretools.variable_types as vtypes # Utilities import sys import psutil import os from timeit import default_timer as timer # - # ## Convert Data Types # # The first step is to convert all the data types we can. Using `category` instead of `object` can significantly reduced memory usage if the number of unique categories is much less than the number of observations. For more on the `category` type in Pandas, look at [the documentation.](https://pandas.pydata.org/pandas-docs/stable/categorical.html) # # While this isn't specific to Dask, it's a good practice in general. The function below can be modified for different problems as required. def convert_types(df): """Convert pandas data types for memory reduction.""" # Iterate through each column for c in df: # Convert ids and booleans to integers if ('SK_ID' in c): df[c] = df[c].fillna(0).astype(np.int32) # Convert objects to category elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]): df[c] = df[c].astype('category') # Booleans mapped to integers elif set(df[c].unique()) == {0, 1}: df[c] = df[c].astype(bool) # Float64 to float32 elif df[c].dtype == float: df[c] = df[c].astype(np.float32) # Int64 to int32 elif df[c].dtype == int: df[c] = df[c].astype(np.int32) return df # Now we'll read in the datasets and apply the convert types function. # + # Read in the datasets and replace the anomalous values app_train = pd.read_csv('../input/application_train.csv').replace({365243: np.nan}) app_test = pd.read_csv('../input/application_test.csv').replace({365243: np.nan}) bureau = pd.read_csv('../input/bureau.csv').replace({365243: np.nan}) bureau_balance = pd.read_csv('../input/bureau_balance.csv').replace({365243: np.nan}) cash = pd.read_csv('../input/POS_CASH_balance.csv').replace({365243: np.nan}) credit = pd.read_csv('../input/credit_card_balance.csv').replace({365243: np.nan}) previous = pd.read_csv('../input/previous_application.csv').replace({365243: np.nan}) installments = pd.read_csv('../input/installments_payments.csv').replace({365243: np.nan}) app_test['TARGET'] = np.nan # Join together training and testing app = app_train.append(app_test, ignore_index = True, sort = True) number_clients = app.shape[0] # Need `SK_ID_CURR` in every dataset bureau_balance = bureau_balance.merge(bureau[['SK_ID_CURR', 'SK_ID_BUREAU']], on = 'SK_ID_BUREAU', how = 'left') print(f"""Total memory before converting types: \ {round(np.sum([x.memory_usage().sum() / 1e9 for x in [app, bureau, bureau_balance, cash, credit, previous, installments]]), 2)} gb.""") # Convert types to reduce memory usage app = convert_types(app) bureau = convert_types(bureau) bureau_balance = convert_types(bureau_balance) cash = convert_types(cash) credit = convert_types(credit) previous = convert_types(previous) installments = convert_types(installments) print(f"""Total memory after converting types: \ {round(np.sum([x.memory_usage().sum() / 1e9 for x in [app, bureau, bureau_balance, cash, credit, previous, installments]]), 2)} gb.""") # Set the index for locating for dataset in [app, bureau, bureau_balance, cash, credit, previous, installments]: dataset.set_index('SK_ID_CURR', inplace = True) # + print('Object memory usage.') print(bureau['CREDIT_TYPE'].astype('object').memory_usage() / 1e9, 'gb') print('Category memory usage.') print(bureau['CREDIT_TYPE'].astype('category').memory_usage() / 1e9, 'gb') print('Length of data: ', bureau.shape[0]) print('Number of unique categories: ', bureau['CREDIT_TYPE'].nunique()) # - # We can see the significant difference in memory usage depending on the data type. Since we are looking to get the most from our machine, any step that can reduce computational overhead is beneficial. # # Partitioning Data # # Next, we partition the data into 104 separate datasets based on the client id, `SK_ID_CURR` and save the partitions to disk. Every partition will contain the data associated with a subset of the clients and therefore will have 7 smaller csv files. __What is important here is that each partition contains all the data needed to make a feature matrix for the clients and therefore the feature matrix calculations are independent of one another.__ # # * Each partition by itself contains all the data needed to make an `EntitySet` for the clients # * This `EntitySet` can then be used to create a feature matrix # * Partitioning and saving the raw data allows for more flexilibilitiy when we create the entity set and feature matrix # # 104 partitions was chosen after some trial and error guided by 4 general ideas: # # 1. We want more tasks than workers # 2. The tasks must be small enough that they don't exhaust the memory of an individual worker # 3. More tasks will decrease the variance in completion time for each task # 4. The number of tasks should be a multiple of the number of workers # # As with many applications in machine learning, experimentation is often the best way to find what works. def create_partition(user_list, partition): """Creates and saves a dataset with only the users in `user_list`.""" # Make the directory directory = '../input/partitions/p%d' % (partition + 1) if os.path.exists(directory): return else: os.makedirs(directory) # Subset based on user list app_subset = app[app.index.isin(user_list)].copy().reset_index() bureau_subset = bureau[bureau.index.isin(user_list)].copy().reset_index() # Drop SK_ID_CURR from bureau_balance, cash, credit, and installments bureau_balance_subset = bureau_balance[bureau_balance.index.isin(user_list)].copy().reset_index(drop = True) cash_subset = cash[cash.index.isin(user_list)].copy().reset_index(drop = True) credit_subset = credit[credit.index.isin(user_list)].copy().reset_index(drop = True) previous_subset = previous[previous.index.isin(user_list)].copy().reset_index() installments_subset = installments[installments.index.isin(user_list)].copy().reset_index(drop = True) # Save data to the directory app_subset.to_csv('%s/app.csv' % directory, index = False) bureau_subset.to_csv('%s/bureau.csv' % directory, index = False) bureau_balance_subset.to_csv('%s/bureau_balance.csv' % directory, index = False) cash_subset.to_csv('%s/cash.csv' % directory, index = False) credit_subset.to_csv('%s/credit.csv' % directory, index = False) previous_subset.to_csv('%s/previous.csv' % directory, index = False) installments_subset.to_csv('%s/installments.csv' % directory, index = False) if partition % 10 == 0: print('Saved all files in partition {} to {}.'.format(partition + 1, directory)) # + # Break into 104 chunks chunk_size = app.shape[0] // 103 # Construct an id list id_list = [list(app.iloc[i:i+chunk_size].index) for i in range(0, app.shape[0], chunk_size)] # + from itertools import chain # Sanity check that we have not missed any ids print('Number of ids in id_list: {}.'.format(len(list(chain(*id_list))))) print('Total length of application data: {}.'.format(len(app))) # + start = timer() for i, ids in enumerate(id_list): # Create a partition based on the ids create_partition(ids, i) end = timer() print(f'Partitioning took {round(end - start)} seconds.') # - # __I already had the partitions made, but running the above cell took 1300 seconds (21 minutes) the first time. __ # # We can independently generate the feature matrix for each partition because the partition contains all the data for that group of clients. Moreover, each subset of data is small enough for the feature matrix calculation to fit entirely on one core. # #### Load in Feature Definitions # # We already calculated the feature definitions in the Automated Loan Repayment notebook so we can read them in. This avoids the need to have to recalculate the features on each partition. Instead of using `ft.dfs`, if we have the feature names, we can use `ft.calculate_feature_matrix` and pass in the `EntitySet` and the feature names. More importantly, it ensures that we create __the exact same set of features for each partition.__ feature_defs = ft.load_features('../input/features.txt') print(len(feature_defs)) # For each feature matrix, we'll make 1820 features. # #### Variable Types # # If the Automated notebook, we specified the variable types when adding entities to the entityset. However, since we already properly defined the data types for each column, Featuretools will now infer the correct variable type. For example, while before we have Booleans mapped to integers which would be interpreted as numeric, now the Booleans are represented as Booleans and hence will be correctly inferred by Featuretools. # # Function to Create EntitySet from Partition # # The next function takes a single partition of data and make an `EntitySet`. We won't save these entitysets to disk, but instead will keep them in memory while calculating the feature matrices. Therefore, if we want to make any changes to the `EntitySet`, such as adding in interesting values or seed features, we can alter this function and remake the `EntitySet` without having to rewrite all the Entity Sets on disk. Writing the entity sets to disk would be another option if we are sure that they won't ever change. We are not going to change the raw data which is why I choose to save the data partitions to the hard drive. def entityset_from_partition(path): """Create an EntitySet from a partition of data specified as a path. Returns a dictionary with the entityset and the number used for saving the feature matrix.""" partition_num = int(path[21:]) # Read in data app = pd.read_csv('%s/app.csv' % path) bureau = pd.read_csv('%s/bureau.csv' % path) bureau_balance = pd.read_csv('%s/bureau_balance.csv' % path) previous = pd.read_csv('%s/previous.csv' % path) credit = pd.read_csv('%s/credit.csv' % path) installments = pd.read_csv('%s/installments.csv' % path) cash = pd.read_csv('%s/cash.csv' % path) # Empty entityset es = ft.EntitySet(id = 'clients') # Entities with a unique index es = es.entity_from_dataframe(entity_id = 'app', dataframe = app, index = 'SK_ID_CURR') es = es.entity_from_dataframe(entity_id = 'bureau', dataframe = bureau, index = 'SK_ID_BUREAU') es = es.entity_from_dataframe(entity_id = 'previous', dataframe = previous, index = 'SK_ID_PREV') # Entities that do not have a unique index es = es.entity_from_dataframe(entity_id = 'bureau_balance', dataframe = bureau_balance, make_index = True, index = 'bureaubalance_index') es = es.entity_from_dataframe(entity_id = 'cash', dataframe = cash, make_index = True, index = 'cash_index') es = es.entity_from_dataframe(entity_id = 'installments', dataframe = installments, make_index = True, index = 'installments_index') es = es.entity_from_dataframe(entity_id = 'credit', dataframe = credit, make_index = True, index = 'credit_index') # Relationship between app_train and bureau r_app_bureau = ft.Relationship(es['app']['SK_ID_CURR'], es['bureau']['SK_ID_CURR']) # Relationship between bureau and bureau balance r_bureau_balance = ft.Relationship(es['bureau']['SK_ID_BUREAU'], es['bureau_balance']['SK_ID_BUREAU']) # Relationship between current app and previous apps r_app_previous = ft.Relationship(es['app']['SK_ID_CURR'], es['previous']['SK_ID_CURR']) # Relationships between previous apps and cash, installments, and credit r_previous_cash = ft.Relationship(es['previous']['SK_ID_PREV'], es['cash']['SK_ID_PREV']) r_previous_installments = ft.Relationship(es['previous']['SK_ID_PREV'], es['installments']['SK_ID_PREV']) r_previous_credit = ft.Relationship(es['previous']['SK_ID_PREV'], es['credit']['SK_ID_PREV']) # Add in the defined relationships es = es.add_relationships([r_app_bureau, r_bureau_balance, r_app_previous, r_previous_cash, r_previous_installments, r_previous_credit]) return ({'es': es, 'num': partition_num}) # Let's test the function to make sure it can make an `EntitySet` from a data partition. es1_dict = entityset_from_partition('../input/partitions/p1') es1_dict['es'] # The function works as intended. The next step is to write a function that can take a single `EntitySet` and the `features` we want to build, and make a feature matrix. (`entityset_from_partition` returns a dictionary with the partition number so we can save the feature matrix based on this number.) # # # Function to Create Feature Matrix from EntitySet # # With the entity set and the feature names, generating the feature matrix is a one-liner in Featuretools. Since we are going to use Dask for parallelizing the operation, we'll set the number of jobs to 1. The `chunk_size` is an extremely important parameter, and I'd suggest experimenting with this to find the optimal value. What I've found works best is setting the `chunk_size` to the length of the entire dataset provided it can all fit in memory. The best choice may depend on the exact problem. # # The last step in the function is to save the feature matrix to disk using the name of the partition of data. Using the `feature_defs` ensures that we create the exact same set of features for each parition. def feature_matrix_from_entityset(es_dict, feature_defs, return_fm = False): """Run deep feature synthesis from an entityset and feature definitions. Saves feature matrix based on partition.""" # Extract the entityset es = es_dict['es'] # Calculate the feature matrix and save feature_matrix = ft.calculate_feature_matrix(feature_defs, entityset=es, n_jobs = 1, verbose = 0, chunk_size = es['app'].df.shape[0]) feature_matrix.to_csv('../input/fm/p%d_fm.csv' % es_dict['num'], index = True) if return_fm: return feature_matrix # Below we test the function using the entityset from the first partition. # + import warnings warnings.filterwarnings('ignore', category = FutureWarning) start = timer() fm1 = feature_matrix_from_entityset(es1_dict, feature_defs, return_fm = True) end = timer() fm1.shape # - print(f'Computing one feature matrix took {round(end - start, 2)} seconds.') # __We now have both parts needed to go from a data partition on disk to a feature matrix made using 1/104 of the data.__ All we have to do is repeat this operation 104 times and we will have all of our features. Since we have eight cores, we can make eight feature matrices at once (your number may differ). This gets around the fundamental bottleneck in the calculation: __running on a single core is inefficient__ especially when we have 8 available. # # To actually run this in parallel, we use the Dask library. # # Dask # # We will use the Dask to parallelize the calculation of feature matrices. First, we'll import and set up a `Client` using processes, which will create one worker for each core on the machine. The memory limit of each worker will be the total system memory (16 gb) divided by the number of cores (8). # # Then we'll use the `db.from_sequence` method to create a "Dask bag" from the partition paths. A [Dask bag](http://dask.pydata.org/en/latest/bag-overview.html) is just a set of operations that we want to run in parallel. We then `map` the paths to the `entityset_from_partition` function which will create the `EntitySets`. These in turn are `map`ped to the `feature_matrix_from_entityset` to make the `feature_matrix` for one of the 104 partitions. (A map is a method that takes a function and a list of inputs and applys the function to each input). # # Below we clear the system memory for a full run of Dask. # + import gc # Free up all system memory gc.enable() del app, bureau, bureau_balance, previous, credit, cash, installments gc.collect() # - # The code below starts up 8 workers, each using one of our cores. The memory limit per worker will be the total system memory divided by the number of cores. We use `processes` instead of threads because we doing computationally heavy work and because each calculation is __independent__ meaning the workers do not need to talk with one another. # # (The issue with Python and threads is that threads share memory - processes do not - and because of the [Global Interpreter Lock](https://wiki.python.org/moin/GlobalInterpreterLock) there are few operations that can run in parallel using threads.) For more on the topic of processes, threads, and the global interpreter lock in Python, I recommend [this article](https://medium.com/@bfortuner/python-multithreading-vs-multiprocessing-73072ce5600b). # + import dask.bag as db from dask.distributed import Client # Use all 8 cores client = Client(processes = True) # - client.ncores() # ## Visualizations of Dask # # After starting a `Client`, if you have `Bokeh` installed, you can navigate to http://localhost:8787/ to view the status of the workers. Doing this on my machine (8 cores with 16 gb total RAM) gives me: # # ![](../images/process_workers.png) # # Right now we aren't taxing our system very much! # Next, let's create a list of paths of our partitions. paths = ['../input/partitions/p%d' % i for i in range(1, 105)] paths[:8] # We made the partitions small enough that none of the feature matrices will be too large for an individual worker. # # The next step is the heart of the code. We create a "Dask bag" from the paths, map this to the `EntitySet` creating function, and then map the result to the `feature_matrix` create and save function. The `EntitySet` is never saved and only exists in working memory. The cell below does not actually execute the code, but only creates the `bag` of tasks that Dask will then be able to allocate to our workers. # + # Create a bag object b = db.from_sequence(paths) # Map entityset function b = b.map(entityset_from_partition) # Map feature matrix function b = b.map(feature_matrix_from_entityset, feature_defs = feature_defs) b # - # The cell below carries out the computation. Nothing is returned since each feature matrix is saved as a `csv`. # + overall_start = timer() b.compute() overall_end = timer() print(f"Total Time Elapsed: {round(overall_end - overall_start, 2)} seconds.") # - # If we look at the task graph in the Bokeh dashboard we can see all the tasks that and the sequence in which they have to be completed. From the structure of this [Directed Acyclic Graph](https://en.wikipedia.org/wiki/Directed_acyclic_graph), it's clear that this problem is highly parallizable! In computer science terms, this is referred to as [embarassingly parallel](http://www.cs.iusb.edu/~danav/teach/b424/b424_23_embpar.html) because the tasks do not need to communicate with one another. This also lets us run on processes which do not share memory. # # ![](../images/taskgraph_inprogress.png) # If you have Bokeh installed, you can see system information during the run. # # For example, we can look at the task stream which shows the workers and all the tasks: # # ![](../images/taskstream_inprogress.png) # # All 8 of our cores are being utilized with a total of 208 tasks to complete. # The profile tab shows the amount of time taken by each operation: # # ![](../images/profile2.png) # As the operations complete, the Task Graph updates. # # ![](../images/taskgraph_completing.png) # ## Optional Final Feature Matrix # # If we want one final matrix, we can read in the individual feature matrices and join them together. This could be done in Dask using threads, but it just as easily can be done in pure Python with Pandas. # Base directory for feature matrices base = '../input/fm/' fm_paths = [base + p for p in os.listdir(base) if 'fm.csv' in p] # First we read in the dataframes and place them in a list. # + read_start = timer() fms = [pd.read_csv(path) for path in fm_paths] read_end = timer() print(f'Reading in {len(fms)} feature matrices took {round(read_end - read_start)} seconds.') # - # Then we concatenate all the dataframes in the list along the first axis - meaning that we add the rows to each other. # + concat_start = timer() feature_matrix = pd.concat(fms, axis = 0) concat_end = timer() print('Final Feature Matrix Shape:', feature_matrix.shape) # - print(f"Concatenation time: {round(concat_end - concat_start, 2)} seconds.") # The final feature matrix is exactly the expected shape: the number of clients in `app` by the number of features. # If you don't already have the feature matrix, you can use the following line to save it to disk. This is now ready for feature selection and modeling! # feature_matrix.reset_index(inplace = True) # feature_matrix.to_csv('../input/feature_matrix.csv', index = False) feature_matrix.head() # # Conclusions # # __Using Dask to run the operations in parallel on all our cores reduced the total time to get the feature matrix from 25 hours to under 3 hours.__ This notebook shows that sometimes instead of always trying to get a bigger machine, we need to think about how to use the resouces we have as efficiently as possible. # # For this problem, to run it in parallel, we had to break one large problem into several smaller ones as follows: # # 1. Partition the data into subsets based on the clients # 2. Write a function to generate an `EntitySet` from a partition # 3. Write a function to create a `feature_matrix` from an `EntitySet` # 4. Set up Dask to use all 8 cores to make a feature matrix from 8 partitions at once # 5. Save the resulting feature matrices to disk # 6. (Optional) read in the individual feature matrices to create one final feature matrix. # # __Parallel processing allows us to take full advantage of our system's resources and the same framework developed in this notebook can be applied to other data science and machine learning problems as well: break a large problem up into manageable smaller ones. Now, not only can we use Featuretools to automatically generate thousands of relevant features, but we can use Dask to run the calculation in parallel and get the most out of our available system.__
Loan Repayment/notebooks/Featuretools on Dask.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="vTKpNmoM6x-C" # Download Data # + colab={} colab_type="code" id="TZW55ko46x-S" import requests def download_file_from_google_drive(id, destination): URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params = { 'id' : id }, stream = True) token = get_confirm_token(response) if token: params = { 'id' : id, 'confirm' : token } response = session.get(URL, params = params, stream = True) save_response_content(response, destination) def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) if __name__ == "__main__": file_id = 'TAKE ID FROM SHAREABLE LINK' destination = 'DESTINATION FILE ON YOUR DISK' download_file_from_google_drive("0BxYys69jI14kYVM3aVhKS1VhRUk", "UTKFace.tar.gz") # + [markdown] colab_type="text" id="ugSwgj_96x-h" # Install dependancies # + colab={} colab_type="code" id="ptgpJNpp6x-l" outputId="e9ca6cc9-eb76-455b-f43b-4403d5a4180e" # !pip install opencv-python # !apt update && apt install -y libsm6 libxext6 libxrender-dev # + colab={} colab_type="code" id="6NbCULwU6x-5" outputId="9ec54204-60bf-4f97-9924-a3898aba67c2" # !pip install requests # + colab={} colab_type="code" id="blO_1WP76x_G" outputId="bc519947-392f-4eec-8678-7ac102e11d86" # !pip install keras # + colab={} colab_type="code" id="beu_x-Fs6x_T" outputId="764c5be4-8794-46e5-8387-68efa86a363b" # !pip install imageio # + colab={} colab_type="code" id="JyBxrVVk6x_i" outputId="d870d9e5-f5fd-4772-d040-4e34d26a6271" # !pip install imageio # + colab={} colab_type="code" id="DD-jawqe6x_v" outputId="c9093b45-9612-4b55-91f7-ab80ca6bc2ef" pip install git+https://www.github.com/keras-team/keras-contrib.git # + [markdown] colab_type="text" id="X1LkVQpD6x_8" # Arrange data # + colab={} colab_type="code" id="2_3lCHYB6x__" # !gunzip UTKFace.tar.gz # !tar -xf UTKFace.tar # + colab={} colab_type="code" id="_JfYbv4c6yAM" outputId="1cea861c-56a4-4f1f-9dae-a3703c6c5b2d" # !mkdir data # !mkdir data/trainA data/trainB # !mkdir data/trainArace data/trainBrace # + colab={} colab_type="code" id="mk1paMTx6yAY" outputId="e0d0504c-f92a-45b9-8cb2-1a7d31c205c2" import os, shutil images = os.listdir("UTKFace") for f in images: try: val = f.split("_") age = int(val[0]) race = int(val[2]) if(age >=20 and age<=30): shutil.copy("UTKFace/"+f, "data/trainA") if(age >=50 and age<=60): shutil.copy("UTKFace/"+f, "data/trainB") if race==0: shutil.copy("UTKFace/"+f, "data/trainArace") if race==1: shutil.copy("UTKFace/"+f, "data/trainBrace") except: print(f) # + [markdown] colab_type="text" id="IgJodimN6yAk" # Dataloader and preprocessing for input images # + colab={} colab_type="code" id="1BwXir1W6yAp" import scipy from glob import glob import numpy as np from skimage.transform import resize import imageio class DataLoader(): def __init__(self, dataset_name, img_res=(128, 128)): self.dataset_name = dataset_name self.img_res = img_res def load_data(self, domain, batch_size=1, is_testing=False, is_race=False): if is_race: path = glob('./%s/%s%s%s/*' % (self.dataset_name, "train", domain, "race")) data_type = "train"+"race"+domain else: path = glob('./%s/%s%s/*' % (self.dataset_name, "train", domain)) #data_type = "train%s" % domain if not is_testing else "test%s" % domain batch_images = np.random.choice(path, size=batch_size) imgs = [] print(domain, batch_images) for img_path in batch_images: img = self.imread(img_path) if not is_testing: img = resize(img, self.img_res) if np.random.random() > 0.5: img = np.fliplr(img) else: img = resize(img, self.img_res) imgs.append(img) imgs = np.array(imgs)/127.5 - 1. return imgs def load_batch(self, batch_size=1, is_testing=False, is_race=False): path_A = glob('./%s/%sA/*' % (self.dataset_name, "train")) path_B = glob('./%s/%sB/*' % (self.dataset_name, "train")) path_Arace = glob('./%s/%s/*' % (self.dataset_name, "trainArace")) path_Brace = glob('./%s/%s/*' % (self.dataset_name, "trainBrace")) print(len(path_A), len(path_B), len(path_Arace), len(path_Brace)) self.n_batches = int(min(len(path_A), len(path_B), len(path_Arace), len(path_Brace)) / batch_size) if is_race: path_A, path_B = path_Arace, path_Brace total_samples = self.n_batches * batch_size # Sample n_batches * batch_size from each path list so that model sees all # samples from both domains path_A = np.random.choice(path_A, total_samples, replace=False) path_B = np.random.choice(path_B, total_samples, replace=False) for i in range(self.n_batches-1): batch_A = path_A[i*batch_size:(i+1)*batch_size] batch_B = path_B[i*batch_size:(i+1)*batch_size] imgs_A, imgs_B = [], [] for img_A, img_B in zip(batch_A, batch_B): img_A = self.imread(img_A) img_B = self.imread(img_B) img_A = resize(img_A, self.img_res) img_B = resize(img_B, self.img_res) if not is_testing and np.random.random() > 0.5: img_A = np.fliplr(img_A) img_B = np.fliplr(img_B) imgs_A.append(img_A) imgs_B.append(img_B) imgs_A = np.array(imgs_A)/127.5 - 1. imgs_B = np.array(imgs_B)/127.5 - 1. yield imgs_A, imgs_B def load_img(self, path): img = self.imread(path) img = resize(img, self.img_res) img = img/127.5 - 1. return img[np.newaxis, :, :, :] def get_img(self, img): img = resize(img, self.img_res) img = img/127.5 - 1. return img def revert_img(self, img, new_res): img = resize(img, new_res) img = (img)*0.5 + 0.5 img = img*255 img = img.astype(np.float32) return img def imread(self, path): return imageio.imread(path, as_gray=False, pilmode="RGB").astype(np.float) def revert_img(img, new_res): img = (img)*0.5 + 0.5 img = img*255 img = resize(img, new_res) img = img.astype(np.float32) return img # + [markdown] colab_type="text" id="sQp-OmMC6yA0" # CycleGan code. # Follow the tricks mentioned in the article # + colab={} colab_type="code" id="wtKK9-G46yA4" from __future__ import print_function, division import scipy, os import scipy.misc from keras.datasets import mnist from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers.advanced_activations import LeakyReLU from keras.activations import relu from keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose from keras.models import Sequential, Model from keras.optimizers import Adam import datetime import matplotlib.pyplot as plt import sys import numpy as np import os import keras import shutil, os, random from keras.models import load_model class CycleGAN(): def __init__(self): # Input shape self.img_rows = 256 self.img_cols = 256 self.channels = 3 self.img_shape = (self.img_rows, self.img_cols, self.channels) self.condition_shape = (self.img_rows, self.img_cols, 1) # Configure data loader self.dataset_name = 'data' self.data_loader = DataLoader(dataset_name=self.dataset_name, img_res=(self.img_rows, self.img_cols)) # Calculate output shape of D (PatchGAN) patch = int(self.img_rows / 2**4) self.disc_patch = (patch, patch, 1) # Number of filters in the first layer of G and D self.gf = 64 self.df = 64 # Loss weights self.lambda_cycle = 0.1 # Cycle-consistency loss self.lambda_id = 0.1 * self.lambda_cycle # Identity loss optimizer = Adam(0.0002, 0.5) # Build and compile the discriminators self.d_A = self.build_discriminator() if os.path.exists("d_A.h5"): self.d_A.load_weights("d_A.h5") self.d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.d_Arace = self.build_discriminator() if os.path.exists("d_Arace.h5"): self.d_Arace.load_weights("d_Arace.h5") self.d_Arace.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.d_B = self.build_discriminator() if os.path.exists("d_B.h5"): self.d_A.load_weights("d_B.h5") self.d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.d_Brace = self.build_discriminator() if os.path.exists("d_Brace.h5"): self.d_A.load_weights("d_Brace.h5") self.d_Brace.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) #------------------------- # Construct Computational # Graph of Generators #------------------------- # Build the generators self.g_AB = self.build_generator() if os.path.exists("g_AB.h5"): self.d_A.load_weights("g_AB.h5") self.g_BA = self.build_generator() if os.path.exists("g_BA.h5"): self.d_A.load_weights("g_BA.h5") # Input images from both domains img_A = Input(shape=self.img_shape) img_B = Input(shape=self.img_shape) label = Input(shape=self.condition_shape) # Translate images to the other domain fake_B = self.g_AB([img_A, label]) fake_A = self.g_BA([img_B, label]) # Translate images back to original domain reconstr_A = self.g_BA([fake_B, label]) reconstr_B = self.g_AB([fake_A, label]) # Identity mapping of images img_A_id = self.g_BA([img_A, label]) img_B_id = self.g_AB([img_B, label]) # For the combined model we will only train the generators self.d_A.trainable = False self.d_B.trainable = False self.d_Arace.trainable = False self.d_Brace.trainable = False # Discriminators determines validity of translated images valid_A = self.d_A(fake_A) valid_B = self.d_B(fake_B) valid_Arace = self.d_Arace(fake_A) valid_Brace = self.d_Brace(fake_B) # Combined model trains generators to fool discriminators self.combined = Model(inputs=[img_A, img_B, label], outputs=[ valid_A, valid_B, valid_Arace, valid_Brace, reconstr_A, reconstr_B, img_A_id, img_B_id ]) if os.path.exists("combined.h5"): self.combined.load_weights("combined.h5") self.combined.compile(loss=['mse', 'mse', 'mse', 'mse', 'mae', 'mae', 'mae', 'mae'], loss_weights=[ 1, 1, 1, 1, self.lambda_cycle, self.lambda_cycle, self.lambda_id, self.lambda_id ], optimizer=optimizer) def build_generator(self): """Resnet Generator""" def conv2d(layer_input, filters=16, strides=1, name=None, f_size=4): d = Conv2D(filters, kernel_size=f_size, strides=strides, padding='same', name=name)(layer_input) d = InstanceNormalization(name=name+"_bn")(d) d = Activation('relu')(d) return d def residual(layer_input, filters=16, strides=1, name=None, f_size=3): d = conv2d(layer_input, filters=filters, strides=strides, name=name, f_size=f_size) d = Conv2D(filters, kernel_size=f_size, strides=strides, padding='same', name=name+"_2")(d) d = InstanceNormalization(name=name+"_bn2")(d) d = keras.layers.add([d, layer_input]) return d def conv2d_transpose(layer_input, filters=16, strides=1, name=None, f_size=4): u = Conv2DTranspose(filters, strides=strides, name=name, kernel_size=f_size, padding='same')(layer_input) u = InstanceNormalization(name=name+"_bn")(u) u = Activation('relu')(u) return u # Image input c0 = Input(shape=self.img_shape) cl = Input(shape=self.condition_shape) concat_layer= Concatenate()([c0, cl]) c1 = conv2d(concat_layer, filters=self.gf, strides=1, name="g_e1", f_size=7) c2 = conv2d(c1, filters=self.gf*2, strides=2, name="g_e2", f_size=3) c3 = conv2d(c2, filters=self.gf*4, strides=2, name="g_e3", f_size=3) r1 = residual(c3, filters=self.gf*4, name='g_r1') r2 = residual(r1, self.gf*4, name='g_r2') r3 = residual(r2, self.gf*4, name='g_r3') r4 = residual(r3, self.gf*4, name='g_r4') r5 = residual(r4, self.gf*4, name='g_r5') r6 = residual(r5, self.gf*4, name='g_r6') r7 = residual(r6, self.gf*4, name='g_r7') r8 = residual(r7, self.gf*4, name='g_r8') r9 = residual(r8, self.gf*4, name='g_r9') d1 = conv2d_transpose(r9, filters=self.gf*2, f_size=3, strides=2, name='g_d1_dc') d2 = conv2d_transpose(d1, filters=self.gf, f_size=3, strides=2, name='g_d2_dc') output_img = Conv2D(self.channels, kernel_size=7, strides=1, padding='same', activation='tanh')(d2) return Model(inputs=[c0, cl], outputs=[output_img]) def build_discriminator(self): def d_layer(layer_input, filters, f_size=4, normalization=True): """Discriminator layer""" d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if normalization: d = InstanceNormalization()(d) return d img = Input(shape=self.img_shape) d1 = d_layer(img, self.df, normalization=False) d2 = d_layer(d1, self.df*2) d3 = d_layer(d2, self.df*4) d4 = d_layer(d3, self.df*8) validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4) return Model(img, validity) def train(self, epochs, batch_size=1, sample_interval=50): start_time = datetime.datetime.now() # Adversarial loss ground truths valid = np.ones((batch_size,) + self.disc_patch) fake = np.zeros((batch_size,) + self.disc_patch) case1 = np.ones(self.condition_shape) case2 = np.zeros(self.condition_shape) for epoch in range(epochs): race_data = self.data_loader.load_batch(batch_size, is_race=True) for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)): # ---------------------- # Train Discriminators # ---------------------- # Translate images to opposite domain case1stack = np.array([case1]*len(imgs_A)) fake_B = self.g_AB.predict([imgs_A, case1stack]) fake_A = self.g_BA.predict([imgs_B, case1stack]) # Train the discriminators (original images = real / translated = Fake) dA_loss_real = self.d_A.train_on_batch(imgs_A, valid) dA_loss_fake = self.d_A.train_on_batch(fake_A, fake) dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake) dB_loss_real = self.d_B.train_on_batch(imgs_B, valid) dB_loss_fake = self.d_B.train_on_batch(fake_B, fake) dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake) # Total disciminator loss d_loss = 0.5 * np.add(dA_loss, dB_loss) validA = self.d_Arace.predict(imgs_A) validB = self.d_Brace.predict(imgs_B) # ------------------ # Train Generators # ------------------ # Train the generators g_loss = self.combined.train_on_batch([imgs_A, imgs_B, case1stack], [valid, valid, validA, validB, imgs_A, imgs_B, imgs_A, imgs_B]) elapsed_time = datetime.datetime.now() - start_time if batch_i%50==0: # Plot the progress print ("[Age Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \ % ( epoch, epochs, batch_i, self.data_loader.n_batches, d_loss[0], 100*d_loss[1], g_loss[0], np.mean(g_loss[1:3]), np.mean(g_loss[3:5]), np.mean(g_loss[5:6]), elapsed_time)) imgs_A, imgs_B = next(race_data) case2stack = np.array([case2]*len(imgs_A)) fake_B = self.g_AB.predict([imgs_A, case2stack]) fake_A = self.g_BA.predict([imgs_B, case2stack]) # Train the discriminators (original images = real / translated = Fake) dA_loss_real = self.d_Arace.train_on_batch(imgs_A, valid) dA_loss_fake = self.d_Arace.train_on_batch(fake_A, fake) dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake) dB_loss_real = self.d_Brace.train_on_batch(imgs_B, valid) dB_loss_fake = self.d_Brace.train_on_batch(fake_B, fake) dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake) # Total disciminator loss d_loss = 0.5 * np.add(dA_loss, dB_loss) validA = self.d_A.predict(imgs_A) validB = self.d_B.predict(imgs_B) # ------------------ # Train Generators # ------------------ # Train the generators g_loss = self.combined.train_on_batch([imgs_A, imgs_B, case2stack], [validA, validB, valid, valid, imgs_A, imgs_B, imgs_A, imgs_B]) elapsed_time = datetime.datetime.now() - start_time if batch_i%50==0: # Plot the progress # print ("[Race Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s " \ % ( epoch, epochs, batch_i, self.data_loader.n_batches, d_loss[0], 100*d_loss[1], g_loss[0], np.mean(g_loss[1:3]), np.mean(g_loss[3:5]), np.mean(g_loss[5:6]), elapsed_time)) # If at save interval => save generated image samples if batch_i % sample_interval == 0: self.sample_images(epoch, batch_i, 1) self.g_AB.save_weights("g_AB.h5") self.g_BA.save_weights("g_BA.h5") self.d_A.save_weights("d_A.h5") self.d_B.save_weights("d_B.h5") self.d_Arace.save_weights("d_Arace.h5") self.d_Brace.save_weights("d_Brace.h5") self.combined.save_weights("combined.h5") def sample_images(self, epoch, batch_i, ctype): os.makedirs('images/%s' % self.dataset_name, exist_ok=True) r, c = 2, 3 imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=False, is_race=False) imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=False, is_race=False) # Translate images to the other domain case1 = np.ones(self.condition_shape) case2 = np.zeros(self.condition_shape) case = case1 casestack = np.array([case]*len(imgs_A)) fake_B = self.g_AB.predict([imgs_A, casestack]) fake_A = self.g_BA.predict([imgs_B, casestack]) # Translate back to original domain reconstr_A = self.g_BA.predict([fake_B, casestack]) reconstr_B = self.g_AB.predict([fake_A, casestack]) gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt]) axs[i, j].set_title(titles[j]) axs[i,j].axis('off') cnt += 1 fig.savefig("images/%s/%d_%d.png" % (self.dataset_name, epoch, batch_i)) plt.close() case = case2 casestack = np.array([case]*len(imgs_A)) imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=False, is_race=True) imgs_B = self.data_loader.load_data(domain="B", batch_size=1, is_testing=False, is_race=True) # Translate images to the other domain fake_B = self.g_AB.predict([imgs_A, casestack]) fake_A = self.g_BA.predict([imgs_B, casestack]) # Translate back to original domain reconstr_A = self.g_BA.predict([fake_B, casestack]) reconstr_B = self.g_AB.predict([fake_A, casestack]) gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B]) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 titles = ['Original', 'Translated', 'Reconstructed'] fig, axs = plt.subplots(r, c) cnt = 0 for i in range(r): for j in range(c): axs[i,j].imshow(gen_imgs[cnt]) axs[i, j].set_title(titles[j]) axs[i,j].axis('off') cnt += 1 fig.savefig("images/%s/race%d_%d.png" % (self.dataset_name, epoch, batch_i)) plt.close() def run_20_to_50(self, image): imgs_A = self.data_loader.load_data(domain="A", batch_size=1, is_testing=True) fake_B = self.g_AB.predict(imgs_A) gan = CycleGAN() gan.train(epochs=50, batch_size=2, sample_interval=10) # + [markdown] colab_type="text" id="9S7wlTx56yBE" # Face Detection # + colab={} colab_type="code" id="Y0B1gsLu6yBJ" # !wget https://github.com/spmallick/learnopencv/raw/master/FaceDetectionComparison/models/opencv_face_detector_uint8.pb # !wget https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/opencv_face_detector.pbtxt # + colab={} colab_type="code" id="FM5qs6bl6yBV" # !wget https://statics.sportskeeda.com/editor/2018/09/4c606-1536825356-800.jpg # !mv 4c606-1536825356-800.jpg big3.jpg # + colab={} colab_type="code" id="Ece72ZX16yBp" import cv2 # %matplotlib inline from matplotlib import pyplot as plt from PIL import Image def detectFaceOpenCVDnn(net, frame, ctype): frameOpencvDnn = frame.copy() frameHeight = frameOpencvDnn.shape[0] frameWidth = frameOpencvDnn.shape[1] blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (frameHeight, frameWidth), [104, 117, 123], False, False) net.setInput(blob) detections = net.forward() bboxes = [] for i in range(detections.shape[2]): confidence = detections[0, 0, i, 2] if confidence > conf_threshold: x1 = int(detections[0, 0, i, 3] * frameWidth) y1 = int(detections[0, 0, i, 4] * frameHeight) x2 = int(detections[0, 0, i, 5] * frameWidth) y2 = int(detections[0, 0, i, 6] * frameHeight) bboxes.append([x1, y1, x2, y2]) if not(x1<30 or y1<30 or x2>frameWidth-30 or y2>frameHeight-30): y1, y2 = y1-20, y2+20 x1, x2 = x1-20, x2+20 else: continue crop_img = frameOpencvDnn[y1:y2, x1:x2] crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB).astype("float32") cv2.imwrite("cropped"+str(i)+".jpg", crop_img) inp = np.array([gan.data_loader.get_img(crop_img)]) case1 = np.ones(gan.condition_shape) case2 = np.zeros(gan.condition_shape) if ctype==0: case = case1 else: case = case2 case1stack = np.array([case]*1) old_img = gan.g_AB.predict([inp, case1stack]) new_img = revert_img(old_img[0], (y2-y1, x2-x1)) new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2BGR).astype("float32") frameOpencvDnn[y1:y2, x1:x2] = new_img scipy.misc.imsave("old"+str(i)+".jpg", new_img) return frameOpencvDnn, bboxes conf_threshold = 0.8 modelFile = "opencv_face_detector_uint8.pb" configFile = "opencv_face_detector.pbtxt" net = cv2.dnn.readNetFromTensorflow(modelFile, configFile) frame = cv2.imread("big3.jpg") outOpencvDnn, bboxes = detectFaceOpenCVDnn(net,frame,0) cv2.imwrite("big3_old.jpg", outOpencvDnn) outOpencvDnn, bboxes = detectFaceOpenCVDnn(net,frame,1) cv2.imwrite("big3_black.jpg", outOpencvDnn) # -
Cycle_gan_keras_conditional.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial: Telco customer churn # In this tutorial, we use the dataset form the Kaggle competition: https://www.kaggle.com/blastchar/telco-customer-churn. The goal of the challenge is to predict behavior to retain customers by analyzing all relevant customer data and developing focused customer retention programs. # + import numpy as np import pandas as pd from optbinning import BinningProcess # - # Download the dataset from the link above and load it. df = pd.read_csv("data/kaggle/WA_Fn-UseC_-Telco-Customer-Churn.csv", sep=",", engine="c") df.head() # For this tutorial we use a pandas.Dataframe as input, option supported since version 0.4.0. # + variable_names = list(df.columns[:-1]) X = df[variable_names] y = df["Churn"].values # - # Transform the categorical dichotomic target variable into numerical. mask = y == "Yes" y[mask] = 1 y[~mask] = 0 y = y.astype(int) # The dichotomic variable SeniorCitizen is treated as nominal (categorical). categorical_variables = ["SeniorCitizen"] # Instantiate a ``BinningProcess`` object class with variable names and the list of numerical variables to be considered categorical. Fit with dataframe ``X`` and target array ``y``. # ##### Variable selection criteria # Using parameter ``selection_criteria``, we specify the criteria for variable selection. These criteria will select the top 10 highest IV variables with IV in [0.025, 0.7] and quality score >= 0.01 to discard non-predictive and low-quality variables. selection_criteria = { "iv": {"min": 0.025, "max": 0.7, "strategy": "highest", "top": 10}, "quality_score": {"min": 0.01} } binning_process = BinningProcess(variable_names, categorical_variables=categorical_variables, selection_criteria=selection_criteria) binning_process.fit(X, y) # #### Binning process statistics # The binning process of the pipeline can be retrieved to show information about the problem and timing statistics. binning_process.information(print_level=2) # The ``summary`` method returns basic statistics for each binned variable. binning_process.summary() # The ``get_binned_variable`` method serves to retrieve an optimal binning object, which can be analyzed in detail afterward. Let us analyze the variable "InternetService" representing the customer’s internet service provider (DSL, Fiber optic, No). We observe that customers with Fiber optic internet service providers are more likely to churn. optb = binning_process.get_binned_variable("InternetService") optb.binning_table.build() optb.binning_table.plot(metric="event_rate") # Now, we analyze variable "tenure" representing the number of months the customer has stayed with the company. We see a notorious descending trend indicating that the probability of churn decreases as the permanence of the contract increases. optb = binning_process.get_binned_variable("tenure") optb.binning_table.build() optb.binning_table.plot(metric="event_rate") # #### Transformation # Let's check the selected variables with the given selection criteria. binning_process.get_support(names=True) # Now we transform the original dataset to Weight of Evidence. Only the selected variables will be included in the transformed dataframe. X_transform = binning_process.transform(X, metric="woe") X_transform
doc/source/tutorials/tutorial_binning_process_telco_churn.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Unsupervised classification # The purose of this practice is doing an unsupervised classification using the example viewed in class and some extra data, which has been choosen by ourselves. # # The algorithm that will be used for this aim is called K-means, whith that centroid's clusters must be obtained. # Donwloading required libraries # !pip install numpy # !pip install pandas # Importing the required libraries import numpy as np import pandas as pd from matplotlib import pyplot as plt from sklearn.datasets.samples_generator import make_blobs from sklearn.cluster import KMeans # + matrix_data = {'x': [4,3,1,5,0,2,4,2], 'y': [4,5,2,5,1,2,5,1] } data = pd.DataFrame(matrix_data, columns=['x', 'y']) print(data) # - kmeans = KMeans(n_clusters=4).fit(data) centroids = kmeans.cluster_centers_ print(centroids) # + f, ax = plt.subplots(figsize=(7, 5)) colores = ['r', 'g', 'b'] for i in range(3): plt.scatter(data['x'], data['y'], c = kmeans.labels_.astype(float), s=50, alpha=0.5) plt.scatter(centroids[:,0], centroids[:,1], c=colores[i], s=50) ax.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=100, color='black', label='Centroids') ax.set_title("Agrupamiento s/modelo") ax.legend() plt.show() # + import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb from scipy.io import loadmat # %matplotlib inline def find_closest_centroids(X, centroids): m = X.shape[0] k = centroids.shape[0] idx = np.zeros(m) for i in range(m): min_dist = 1000000 for j in range(k): dist = np.sum((X[i,:] - centroids[j,:]) ** 2) if dist < min_dist: min_dist = dist idx[i] = j return idx initial_centroids = np.array([[0,1],[2,2]]) idx = find_closest_centroids(data, initial_centroids) # -
PL4/python/PECL4.ipynb